"description": "Driver name for MySQL, default is mysql+pymysql.",
"defaultValue": "mysql+pymysql"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
"description": "Driver name for oceanbase, default is mysql+ob.",
"defaultValue": "mysql+ob"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
description: "Powerful open-source relational database with extensibility and SQL standards."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "PostgreSQLParameters",
"description": "Powerful open-source relational database with extensibility and SQL standards.",
"documentationUrl": null,
"parameters": [
{
"name": "host",
"type": "string",
"required": true,
"description": "Database host, e.g., localhost"
},
{
"name": "port",
"type": "integer",
"required": true,
"description": "Database port, e.g., 3306"
},
{
"name": "user",
"type": "string",
"required": true,
"description": "Database user to connect"
},
{
"name": "database",
"type": "string",
"required": true,
"description": "Database name"
},
{
"name": "driver",
"type": "string",
"required": false,
"description": "Driver name for postgres, default is postgresql+psycopg2.",
"defaultValue": "postgresql+psycopg2"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
"description": "Driver name for starrocks, default is starrocks.",
"defaultValue": "starrocks"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
description: "TuGraph is a high-performance graph database jointly developed by Ant Group and Tsinghua University."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "TuGraphParameters",
"description": "TuGraph is a high-performance graph database jointly developed by Ant Group and Tsinghua University.",
"documentationUrl": null,
"parameters": [
{
"name": "host",
"type": "string",
"required": true,
"description": "TuGraph server host"
},
{
"name": "user",
"type": "string",
"required": true,
"description": "TuGraph server user"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
"defaultValue": "${env:DBGPT_DB_PASSWORD}"
},
{
"name": "port",
"type": "integer",
"required": false,
"description": "TuGraph server port, default 7687",
description: "Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of today`s data-driven world."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "VerticaParameters",
"description": "Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of today`s data-driven world.",
"documentationUrl": null,
"parameters": [
{
"name": "host",
"type": "string",
"required": true,
"description": "Database host, e.g., localhost"
},
{
"name": "port",
"type": "integer",
"required": true,
"description": "Database port, e.g., 3306"
},
{
"name": "user",
"type": "string",
"required": true,
"description": "Database user to connect"
},
{
"name": "database",
"type": "string",
"required": true,
"description": "Database name"
},
{
"name": "driver",
"type": "string",
"required": false,
"description": "Driver name for vertica, default is vertica+vertica_python",
"defaultValue": "vertica+vertica_python"
},
{
"name": "password",
"type": "string",
"required": false,
"description": "Database password, you can write your password directly, of course, you can also use environment variables, such as ${env:DBGPT_DB_PASSWORD}",
"description": "TuGraph is a high-performance graph database jointly developed by Ant Group and Tsinghua University.",
"link": "./conn_tugraph_tugraphparameters_0c844e"
},
{
"name": "VerticaParameters",
"description": "Vertica is a strongly consistent, ACID-compliant, SQL data warehouse, built for the scale and complexity of today`s data-driven world.",
"description": "The path of the model, if you want to deploy a local model."
},
{
"name": "device",
"type": "string",
"required": false,
"description": "Device to run model. If None, the device is automatically determined"
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "hf"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "cache_folder",
"type": "string",
"required": false,
"description": "Path of the cache folder."
},
{
"name": "normalize_embeddings",
"type": "boolean",
"required": false,
"description": "Normalize embeddings.",
"defaultValue": "False"
},
{
"name": "multi_process",
"type": "boolean",
"required": false,
"description": "Run encode() on multiple GPUs.",
"defaultValue": "False"
},
{
"name": "model_kwargs",
"type": "object",
"required": false,
"description": "Keyword arguments to pass to the model.",
"defaultValue": "{}"
},
{
"name": "encode_kwargs",
"type": "object",
"required": false,
"description": "Keyword arguments to pass when calling the `encode` method.",
"defaultValue": "{}"
},
{
"name": "embed_instruction",
"type": "string",
"required": false,
"description": "Instruction to use for embedding documents. Just for Instructor model."
},
{
"name": "query_instruction",
"type": "string",
"required": false,
"description": "Instruction to use for embedding query. Just for Instructor model."
description: "OpenAPI embedding deploy model parameters."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "OpenAPIEmbeddingDeployModelParameters",
"description": "OpenAPI embedding deploy model parameters.",
"documentationUrl": "",
"parameters": [
{
"name": "name",
"type": "string",
"required": true,
"description": "The name of the model."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
description: "Jina AI Embeddings deploy model parameters."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "JinaEmbeddingsDeployModelParameters",
"description": "Jina AI Embeddings deploy model parameters.",
"documentationUrl": "",
"parameters": [
{
"name": "name",
"type": "string",
"required": true,
"description": "The name of the model."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
description: "Ollama Embeddings deploy model parameters."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "OllamaEmbeddingDeployModelParameters",
"description": "Ollama Embeddings deploy model parameters.",
"documentationUrl": "",
"parameters": [
{
"name": "name",
"type": "string",
"required": true,
"description": "The name of the model."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "proxy/ollama"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "api_url",
"type": "string",
"required": false,
"description": "The URL of the embeddings API.",
"defaultValue": "http://localhost:11434"
},
{
"name": "backend",
"type": "string",
"required": false,
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
description: "Qianfan Embeddings deploy model parameters."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "QianfanEmbeddingDeployModelParameters",
"description": "Qianfan Embeddings deploy model parameters.",
"documentationUrl": "",
"parameters": [
{
"name": "name",
"type": "string",
"required": true,
"description": "The name of the model."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "proxy/qianfan"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "api_key",
"type": "string",
"required": false,
"description": "The API key for the embeddings API."
},
{
"name": "api_secret",
"type": "string",
"required": false,
"description": "The Secret key for the embeddings API. It's the sk for qianfan."
},
{
"name": "backend",
"type": "string",
"required": false,
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
description: "Qianfan Embeddings deploy model parameters."
---
import { ConfigDetail } from "@site/src/components/mdx/ConfigDetail";
<ConfigDetail config={{
"name": "TongyiEmbeddingDeployModelParameters",
"description": "Qianfan Embeddings deploy model parameters.",
"documentationUrl": "",
"parameters": [
{
"name": "name",
"type": "string",
"required": true,
"description": "The name of the model."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "proxy/tongyi"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "api_key",
"type": "string",
"required": false,
"description": "The API key for the embeddings API."
},
{
"name": "backend",
"type": "string",
"required": false,
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name.",
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "proxy/baichuan"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "prompt_template",
"type": "string",
"required": false,
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
},
{
"name": "context_length",
"type": "integer",
"required": false,
"description": "The context length of the OpenAI API. If None, it is determined by the model."
},
{
"name": "reasoning_model",
"type": "boolean",
"required": false,
"description": "Whether the model is a reasoning model. If None, it is automatically determined from model."
},
{
"name": "api_base",
"type": "string",
"required": false,
"description": "The base url of the Baichuan API.",
"description": "The real model name to pass to the provider, default is None. If backend is None, use name as the real model name."
},
{
"name": "provider",
"type": "string",
"required": false,
"description": "The provider of the model. If model is deployed in local, this is the inference type. If model is deployed in third-party service, this is platform name('proxy/<platform>')",
"defaultValue": "proxy/openai"
},
{
"name": "verbose",
"type": "boolean",
"required": false,
"description": "Show verbose output.",
"defaultValue": "False"
},
{
"name": "concurrency",
"type": "integer",
"required": false,
"description": "Model concurrency limit",
"defaultValue": "100"
},
{
"name": "prompt_template",
"type": "string",
"required": false,
"description": "Prompt template. If None, the prompt template is automatically determined from model. Just for local deployment."
},
{
"name": "context_length",
"type": "integer",
"required": false,
"description": "The context length of the OpenAI API. If None, it is determined by the model."
},
{
"name": "reasoning_model",
"type": "boolean",
"required": false,
"description": "Whether the model is a reasoning model. If None, it is automatically determined from model."