From 51a7a71dbc626c0cdc0d44ab64f53241521bb947 Mon Sep 17 00:00:00 2001 From: jinjia Date: Mon, 10 Feb 2025 10:53:58 +0200 Subject: [PATCH 1/2] docs(karpor): add api proxy parameters in helm installation document --- docs/karpor/1-getting-started/2-installation.md | 15 ++++++++++++++- .../current/1-getting-started/2-installation.md | 15 ++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/docs/karpor/1-getting-started/2-installation.md b/docs/karpor/1-getting-started/2-installation.md index d09ac2ef..154b1e79 100644 --- a/docs/karpor/1-getting-started/2-installation.md +++ b/docs/karpor/1-getting-started/2-installation.md @@ -118,6 +118,14 @@ helm install karpor-release kusionstack/karpor \ --set server.ai.model=deepseek-chat \ --set server.ai.topP=0.5 \ --set server.ai.temperature=0.2 + +# Example using AI Proxy +helm install karpor kusionstack/karpor \ + --set server.ai.authToken={YOUR_AI_TOKEN} \ + --set server.ai.proxy.enabled=true \ + --set server.ai.proxy.httpProxy={YOUR_HTTP_PROXY} \ + --set server.ai.proxy.httpsProxy={YOUR_HTTPS_PROXY} \ + --set server.ai.proxy.noProxy={YOUR_NO_PROXY} ``` ## Chart Parameters @@ -144,11 +152,16 @@ The Karpor Server Component is main backend server. It itself is an `apiserver`, | Key | Type | Default | Description | |-----|------|---------|-------------| -| server.ai | object | `{"authToken":"","backend":"openai","baseUrl":"","model":"gpt-3.5-turbo","temperature":1,"topP":1}` | AI configuration section. The AI analysis feature requires that [authToken, baseUrl] be assigned values. | +| server.ai | object | `{"authToken":"","backend":"openai","baseUrl":"","model":"gpt-3.5-turbo","proxy":{"enabled":false,"httpProxy":"","httpsProxy":"","noProxy":""},"temperature":1,"topP":1}` | AI configuration section. The AI analysis feature requires that [authToken, baseUrl] be assigned values. | | server.ai.authToken | string | `""` | Authentication token for accessing the AI service. | | server.ai.backend | string | `"openai"` | Backend service or platform that the AI model is hosted on. Available options:
- `"openai"`: OpenAI API (default)
- `"azureopenai"`: Azure OpenAI Service
- `"huggingface"`: Hugging Face API
If the backend you are using is compatible with OpenAI, then there is no need to make any changes here. | | server.ai.baseUrl | string | `""` | Base URL of the AI service. e.g., "https://api.openai.com/v1". | | server.ai.model | string | `"gpt-3.5-turbo"` | Name or identifier of the AI model to be used. e.g., "gpt-3.5-turbo". | +| server.ai.proxy | object | `{"enabled":false,"httpProxy":"","httpsProxy":"","noProxy":""}` | Proxy configuration for AI service connections. | +| server.ai.proxy.enabled | bool | `false` | Enable proxy settings for AI service connections. When false, proxy settings will be ignored. | +| server.ai.proxy.httpProxy | string | `""` | HTTP proxy URL for AI service connections (e.g., "http://proxy.example.com:8080"). | +| server.ai.proxy.httpsProxy | string | `""` | HTTPS proxy URL for AI service connections (e.g., "https://proxy.example.com:8080"). | +| server.ai.proxy.noProxy | string | `""` | No proxy configuration for AI service connections (e.g., "localhost,127.0.0.1,example.com"). | | server.ai.temperature | float | `1` | Temperature parameter for the AI model. This controls the randomness of the output, where a higher value (e.g., 1.0) makes the output more random, and a lower value (e.g., 0.0) makes it more deterministic. | | server.ai.topP | float | `1` | Top-p (nucleus sampling) parameter for the AI model. This controls Controls the probability mass to consider for sampling, where a higher value leads to greater diversity in the generated content (typically ranging from 0 to 1) | | server.enableRbac | bool | `false` | Enable RBAC authorization if set to true. | diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md index 46ecf282..e0584a18 100644 --- a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md @@ -118,6 +118,14 @@ helm install karpor-release kusionstack/karpor \ --set server.ai.model=deepseek-chat \ --set server.ai.topP=0.5 \ --set server.ai.temperature=0.2 + +# 使用 AI Proxy 的样例 +helm install karpor kusionstack/karpor \ + --set server.ai.authToken={YOUR_AI_TOKEN} \ + --set server.ai.proxy.enabled=true \ + --set server.ai.proxy.httpProxy={YOUR_HTTP_PROXY} \ + --set server.ai.proxy.httpsProxy={YOUR_HTTPS_PROXY} \ + --set server.ai.proxy.noProxy={YOUR_NO_PROXY} ``` ## Chart 参数 @@ -144,11 +152,16 @@ Karpor 服务器组件是主要的后端服务器。它本身是一个 `apiserve | 键 | 类型 | 默认值 | 描述 | |-----|------|---------|-------------| -| server.ai | object | `{"authToken":"","backend":"openai","baseUrl":"","model":"gpt-3.5-turbo","temperature":1,"topP":1}` | AI 配置部分。AI 分析功能需要为 [authToken, baseUrl] 赋值。 | +| server.ai | object | `{"authToken":"","backend":"openai","baseUrl":"","model":"gpt-3.5-turbo","proxy":{"enabled":false,"httpProxy":"","httpsProxy":"","noProxy":""},"temperature":1,"topP":1}` | AI 配置部分。AI 分析功能需要为 [authToken, baseUrl] 赋值。 | | server.ai.authToken | string | `""` | 访问 AI 服务的认证令牌。 | | server.ai.backend | string | `"openai"` | 托管 AI 模型的后端服务或平台。可用选项:
- `"openai"`: OpenAI API(默认)
- `"azureopenai"`: Azure OpenAI 服务
- `"huggingface"`: Hugging Face API
如果您使用的后端与 OpenAI 兼容,则无需在此处进行任何更改。 | | server.ai.baseUrl | string | `""` | AI 服务的基础 URL。例如:"https://api.openai.com/v1"。 | | server.ai.model | string | `"gpt-3.5-turbo"` | 要使用的 AI 模型的名称或标识符。例如:"gpt-3.5-turbo"。 | +| server.ai.proxy | object | `{"enabled":false,"httpProxy":"","httpsProxy":"","noProxy":""}` | AI 服务连接的代理配置。 | +| server.ai.proxy.enabled | bool | `false` | 启用 AI 服务连接的代理设置。如果为 false,则将忽略代理设置。 | +| server.ai.proxy.httpProxy | string | `""` | AI 服务连接的 HTTP 代理 URL(例如“http://proxy.example.com:8080”)。 | +| server.ai.proxy.httpsProxy | string | `""` | AI 服务连接的 HTTPS 代理 URL(例如“https://proxy.example.com:8080”)。 | +| server.ai.proxy.noProxy | string | `""` | AI 服务连接无需代理配置(例如“localhost,127.0.0.1,example.com”)。 | | server.ai.temperature | float | `1` | AI 模型的温度参数。控制输出的随机性,较高的值(例如 1.0)使输出更随机,较低的值(例如 0.0)使输出更确定性。 | | server.ai.topP | float | `1` | AI 模型的 Top-p(核采样)参数。控制采样的概率质量,较高的值导致生成内容的多样性更大(通常范围为 0 到 1)。 | | server.enableRbac | bool | `false` | 如果设置为 true,则启用 RBAC 授权。 | From 15dbdaf879ffb02a0d571e221706b6c034f7cdf9 Mon Sep 17 00:00:00 2001 From: jinjia Date: Mon, 10 Feb 2025 12:24:42 +0200 Subject: [PATCH 2/2] docs(karpor): update api proxy parameters in helm zh installation document --- .../current/1-getting-started/2-installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md index e0584a18..b904f397 100644 --- a/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md +++ b/i18n/zh/docusaurus-plugin-content-docs-karpor/current/1-getting-started/2-installation.md @@ -161,7 +161,7 @@ Karpor 服务器组件是主要的后端服务器。它本身是一个 `apiserve | server.ai.proxy.enabled | bool | `false` | 启用 AI 服务连接的代理设置。如果为 false,则将忽略代理设置。 | | server.ai.proxy.httpProxy | string | `""` | AI 服务连接的 HTTP 代理 URL(例如“http://proxy.example.com:8080”)。 | | server.ai.proxy.httpsProxy | string | `""` | AI 服务连接的 HTTPS 代理 URL(例如“https://proxy.example.com:8080”)。 | -| server.ai.proxy.noProxy | string | `""` | AI 服务连接无需代理配置(例如“localhost,127.0.0.1,example.com”)。 | +| server.ai.proxy.noProxy | string | `""` | 不需要通过代理服务器进行访问的域名(例如“localhost,127.0.0.1,example.com”)。| | server.ai.temperature | float | `1` | AI 模型的温度参数。控制输出的随机性,较高的值(例如 1.0)使输出更随机,较低的值(例如 0.0)使输出更确定性。 | | server.ai.topP | float | `1` | AI 模型的 Top-p(核采样)参数。控制采样的概率质量,较高的值导致生成内容的多样性更大(通常范围为 0 到 1)。 | | server.enableRbac | bool | `false` | 如果设置为 true,则启用 RBAC 授权。 |