You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
2209 lines
68 KiB
2209 lines
68 KiB
PAYLOAD = 'payload'
|
|
DEFAULT_MAX_TOKENS = 2000
|
|
DEFAULT_TEMPERATURE = 0.5
|
|
DEFAULT_TIMEOUT = 600
|
|
STATUS = 'status'
|
|
SYSTEM_PROMPT = 'You are a helpful assistant'
|
|
RESPONSE_VARIABILITY = 'Response Variability'
|
|
REQUEST_TIMEOUT = 'Request Timeout'
|
|
VALUE = 'value'
|
|
MAX_RESULT_ROWS = 'Maximum Result Rows'
|
|
ENDPOINT = 'Endpoint'
|
|
SAVED = 'is_saved'
|
|
MODEL_SAVED = 'is_model_saved'
|
|
REGION = 'Region'
|
|
ACCESS_KEY_ID = 'AWS Access Key Id'
|
|
AZURE_OPENEAI_VERSION = 'Azure API Version'
|
|
BEDROCK_ACCESS_KEY = 'AWS Access Token'
|
|
BEDROCK_ROLE = 'Role Arn'
|
|
AWS_KEYS = 'aws_keys'
|
|
ACCESS_TOKEN = 'Access Token'
|
|
PARAMS = 'params'
|
|
PROVIDER = 'provider'
|
|
GROQ = 'Groq'
|
|
GEMINI = 'Gemini'
|
|
OPENAI = 'OpenAI'
|
|
OLLAMA = 'Ollama'
|
|
AZUREOPENAI = 'AzureOpenAI'
|
|
ANTHROPIC = 'Anthropic'
|
|
ANTHROPIC_API_VERSION = '2023-06-01'
|
|
MISTRALAI = 'MistralAI'
|
|
BEDROCK = 'Bedrock'
|
|
KB_ID = 'kb_id'
|
|
MAX_TOKENS = 'Max Tokens'
|
|
SET_AS_DEFAULT = 'Set as default'
|
|
CLEAR_PASSWORD = 'clear_password'
|
|
MODELS = 'models'
|
|
SPLUNK_HOSTED_LLM_TIMEOUT = 600
|
|
MAX_COMPLETION_TOKENS = 1000
|
|
TEMPERATURE = "temperature"
|
|
TIMEOUT = "timeout"
|
|
AUTHORIZATION = "Authorization"
|
|
MAX_OUTPUT_TOKENS = "maxOutputTokens"
|
|
PROVIDERS = [
|
|
"Splunk Hosted Models",
|
|
"OpenAI",
|
|
"Anthropic",
|
|
"AzureOpenAI",
|
|
"Groq",
|
|
"Gemini",
|
|
"Bedrock",
|
|
"Ollama",
|
|
]
|
|
MLTK_APP_NAME = 'Splunk_ML_Toolkit'
|
|
SPLUNK_HOSTED_LLM = 'Splunk Hosted Models'
|
|
COLLECTION_NAME = 'mltk_ai_commander_collection'
|
|
ERROR_MESSAGE = 'Request to the LLM has failed. Please check the provided Connection Management configuration settings.'
|
|
LLM_EXCEPTION_LIST = [
|
|
"Authentication failed: Incorrect API key provided. Please check your API key.",
|
|
"The specified model is either unavailable or not supported for the selected API version or method. Please verify the model name and your API version.",
|
|
"Prompt length too long. Please reduce the length of the prompt.",
|
|
"Empty response content received from the server.",
|
|
"The provided model is not accessible by the bedrock account",
|
|
"On-demand usage isn't supported for the selected model. Please choose a supported model.",
|
|
"You exceeded your current quota, please check your plan and billing details.",
|
|
"TimeoutError - Request timed out",
|
|
ERROR_MESSAGE,
|
|
]
|
|
AI_COMMANDER_ROLES = ['mltk_admin']
|
|
AI_COMMANDER_COMMAND_CAPABILITIES = ['apply_ai_commander_command']
|
|
AI_COMMANDER_EDIT_CONFIG_CAPABILITIES = ['edit_ai_commander_config', 'list_ai_commander_config']
|
|
AI_COMMANDER_READ_CONFIG_CAPABILITIES = ['list_ai_commander_config']
|
|
LLM_INTEGRATION_CONFIG = 'ai:LLMIntegrations'
|
|
MAX_RETRIES = 'max_retries'
|
|
BACKOFF_FACTOR = 'backoff_factor'
|
|
CONFIG_VERSION = '1.0'
|
|
CONFIG_DATA = '''{
|
|
"metadata": {
|
|
"version": "",
|
|
"created_at" : "",
|
|
"modified_at": ""
|
|
},
|
|
"Splunk Hosted Models": {
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
}
|
|
},
|
|
"OpenAI": {
|
|
"Endpoint": {
|
|
"value": "https://api.openai.com/v1/chat/completions",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The URL endpoint to access the OpenAI API for processing chat completions."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": true,
|
|
"description": "The token used for authentication when accessing the OpenAI API."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "The timeout duration for the request to the OpenAI."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
"gpt-4o-mini": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"o1-mini": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"o1-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o-mini-2024-07-18": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o-2024-08-06": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o-2024-05-13": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-turbo": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-turbo-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0125-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-1106-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-1106": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-0301": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-16k": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-16k-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0314": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k-0314": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"Anthropic": {
|
|
"Endpoint": {
|
|
"value": "https://api.anthropic.com/v1/messages",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The API endpoint URL for sending messages to Anthropic's Claude model."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": true,
|
|
"description": "The API key required for authenticating requests to the Anthropic API."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum duration (in seconds) before a request to the Anthropic API times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
"claude-3-5-sonnet": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-3-haiku": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-3-opus": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-3-5-sonnet-20240620": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response"
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-3-sonnet": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response"
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-2.1": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-2": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-instant-1.2": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"claude-instant-1": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"AzureOpenAI": {
|
|
"Endpoint": {
|
|
"value": "https://openai-gpt-4-test-v-1.openai.azure.com/",
|
|
"type": "string",
|
|
"required": true,
|
|
"description": "The API endpoint URL for accessing the Azure OpenAI service for generating chat completions."
|
|
},
|
|
"Azure API Version": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": false,
|
|
"description": "The authentication token required to access Azure OpenAI services."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": true,
|
|
"description": "The authentication token required to access Azure OpenAI services."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "The maximum duration (in seconds) before a request to Azure OpenAI times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
"o1-mini": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"o1-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o-mini": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4o": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0314": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k-0314": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-32k-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-1106-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-4-0125-preview": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-0301": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-16k-0613": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gpt-3.5-turbo-16k": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "float",
|
|
"required": true,
|
|
"description": "Controls the randomness of responses, affecting creativity versus consistency."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"Groq": {
|
|
"Endpoint": {
|
|
"value": "https://api.groq.com/openai/v1/chat/completions",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The API endpoint for sending chat completion requests to Groq's language models."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": true,
|
|
"description": "The authentication token required to access Groq's API services."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum duration (in seconds) before a request to Groq's API times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
"llama3-8b-8192": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"llama3-70b-8192": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"mixtral-8x7b-32768": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response"
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gemma2-9b-it": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response"
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"llama-3.1-8b-instant": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"llama-3.3-70b-versatile": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"llama2-70b-4096": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"Gemini": {
|
|
"Endpoint": {
|
|
"value": "https://generativelanguage.googleapis.com/v1beta/models",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The API endpoint for sending chat completion requests to Google's Gemini language model."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": true,
|
|
"hidden": true,
|
|
"description": "The authentication token required to access the Gemini API."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum duration (in seconds) before a request to the Gemini API times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
"gemini-pro": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gemini-1.5-pro-latest": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gemini-2.0-flash": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gemini-2.0-flash-exp": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
},
|
|
"gemini-2.0-flash-lite-preview-02-05": {
|
|
"Response Variability": {
|
|
"value": 0,
|
|
"type": "int",
|
|
"required": true,
|
|
"description": "Adjusts the response's randomness, impacting how varied or deterministic responses are."
|
|
},
|
|
"Maximum Result Rows": {
|
|
"value": 10,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The maximum number of result entries to retrieve in a response."
|
|
},
|
|
"Max Tokens": {
|
|
"value": 2000,
|
|
"type": "int",
|
|
"required": false,
|
|
"description": "The limit on the number of tokens that can be generated in a response."
|
|
},
|
|
"Set as default": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false
|
|
},
|
|
"is_model_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"Bedrock": {
|
|
"Region": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The AWS region where Amazon Bedrock services are hosted."
|
|
},
|
|
"AWS Access Key Id": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": false,
|
|
"hidden": true,
|
|
"description": "The AWS access key ID used for authenticating requests to Amazon Bedrock."
|
|
},
|
|
"AWS Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": false,
|
|
"hidden": true,
|
|
"description": "The temporary AWS session token used for authentication with Amazon Bedrock."
|
|
},
|
|
"Role Arn": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": false,
|
|
"hidden": true,
|
|
"description": "The Amazon Resource Name (ARN) of the IAM role used for accessing Amazon Bedrock services."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The maximum duration (in seconds) before a request to Amazon Bedrock times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
}
|
|
},
|
|
"Ollama": {
|
|
"Endpoint": {
|
|
"value": "http://localhost:11434/",
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The API endpoint URL for connecting to the Ollama server."
|
|
},
|
|
"Access Token": {
|
|
"value": "",
|
|
"type": "string",
|
|
"required": false,
|
|
"hidden": true,
|
|
"description": "The authentication token required for accessing the Ollama API."
|
|
},
|
|
"Request Timeout": {
|
|
"value": 200,
|
|
"type": "string",
|
|
"required": false,
|
|
"description": "The maximum duration (in seconds) before a request to the Ollama API times out."
|
|
},
|
|
"is_saved": {
|
|
"value": false,
|
|
"type": "boolean",
|
|
"required": false,
|
|
"description": "Is Provider details stored"
|
|
},
|
|
"models": {
|
|
|
|
}
|
|
}
|
|
}'''
|