-
Notifications
You must be signed in to change notification settings - Fork 0
Align Codex health check payload with OpenAI Responses protocol #13
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -186,31 +186,39 @@ func TestBuildTestRequest(t *testing.T) { | |
| model string | ||
| expectNonNil bool | ||
| validateModel bool | ||
| expectMax bool | ||
| tokensField string | ||
| expectInput bool | ||
| expectMessage bool | ||
| }{ | ||
| { | ||
| name: "Codex 平台请求", | ||
| platform: "codex", | ||
| model: "gpt-4o-mini", | ||
| expectNonNil: true, | ||
| validateModel: true, | ||
| expectMax: false, | ||
| tokensField: "max_output_tokens", | ||
| expectInput: true, | ||
| expectMessage: false, | ||
| }, | ||
| { | ||
| name: "Claude 平台请求", | ||
| platform: "claude", | ||
| model: "claude-3-5-haiku-20241022", | ||
| expectNonNil: true, | ||
| validateModel: true, | ||
| expectMax: true, | ||
| tokensField: "max_tokens", | ||
| expectInput: false, | ||
| expectMessage: true, | ||
| }, | ||
| { | ||
| name: "映射后的模型名", | ||
| platform: "codex", | ||
| model: "openai/gpt-4o-mini", | ||
| expectNonNil: true, | ||
| validateModel: true, | ||
| expectMax: false, | ||
| tokensField: "max_output_tokens", | ||
| expectInput: true, | ||
| expectMessage: false, | ||
| }, | ||
| } | ||
|
|
||
|
|
@@ -245,12 +253,26 @@ func TestBuildTestRequest(t *testing.T) { | |
| t.Errorf("Expected model %s in request body, got %s", tt.model, model) | ||
| } | ||
|
|
||
| _, hasMaxTokens := reqData["max_tokens"] | ||
| if tt.expectMax && !hasMaxTokens { | ||
| t.Error("Expected max_tokens in request body but not found") | ||
| if tt.tokensField != "" { | ||
| if _, ok := reqData[tt.tokensField]; !ok { | ||
| t.Errorf("Expected %s in request body but not found", tt.tokensField) | ||
| } | ||
| } | ||
| if !tt.expectMax && hasMaxTokens { | ||
| t.Error("max_tokens should not be included for this platform") | ||
|
|
||
| _, hasMessages := reqData["messages"] | ||
| if tt.expectMessage && !hasMessages { | ||
| t.Error("Expected messages in request body but not found") | ||
| } | ||
| if !tt.expectMessage && hasMessages { | ||
| t.Error("messages should not be included for this platform") | ||
| } | ||
|
|
||
| _, hasInput := reqData["input"] | ||
| if tt.expectInput && !hasInput { | ||
| t.Error("Expected input in request body but not found") | ||
| } | ||
| if !tt.expectInput && hasInput { | ||
| t.Error("input should not be included for this platform") | ||
| } | ||
| } | ||
| }) | ||
|
|
@@ -318,23 +340,45 @@ func TestHealthCheck_RequestBodyStructure(t *testing.T) { | |
| t.Fatalf("Failed to parse request body: %v", err) | ||
| } | ||
|
|
||
| // 验证必需字段 | ||
| requiredFields := []string{"model", "max_tokens", "messages"} | ||
| for _, field := range requiredFields { | ||
| if _, ok := reqData[field]; !ok { | ||
| t.Errorf("Required field %s is missing", field) | ||
| if platform == "claude" { | ||
| requiredFields := []string{"model", "max_tokens", "messages"} | ||
| for _, field := range requiredFields { | ||
| if _, ok := reqData[field]; !ok { | ||
| t.Errorf("Required field %s is missing", field) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| // 验证 messages 结构 | ||
| messages, ok := reqData["messages"].([]interface{}) | ||
| if !ok { | ||
| t.Error("messages field is not an array") | ||
| return | ||
| messages, ok := reqData["messages"].([]interface{}) | ||
| if !ok { | ||
| t.Error("messages field is not an array") | ||
| return | ||
| } | ||
| if len(messages) == 0 { | ||
| t.Error("messages array is empty") | ||
| } | ||
| } | ||
|
|
||
| if len(messages) == 0 { | ||
| t.Error("messages array is empty") | ||
| if platform == "codex" { | ||
| requiredFields := []string{"model", "input", "max_output_tokens"} | ||
| for _, field := range requiredFields { | ||
| if _, ok := reqData[field]; !ok { | ||
| t.Errorf("Required field %s is missing", field) | ||
| } | ||
| } | ||
|
|
||
| switch v := reqData["input"].(type) { | ||
| case string: | ||
| if strings.TrimSpace(v) == "" { | ||
| t.Error("input string should not be empty") | ||
| } | ||
| case []interface{}: | ||
| if len(v) == 0 { | ||
| t.Error("input array is empty") | ||
| return | ||
| } | ||
| default: | ||
| t.Errorf("unexpected input type %T", v) | ||
| } | ||
|
Comment on lines
+369
to
+381
|
||
| } | ||
| }) | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The Codex request format has been updated to use the OpenAI Responses API protocol (input field and max_output_tokens), but the ConnectivityTestService at services/connectivitytestservice.go lines 257-266 still uses the old Chat Completions format (messages field) for /responses endpoints. This inconsistency means health checks and connectivity tests will send different request formats to the same Codex endpoints, which could cause confusion or different behavior between the two services. Consider updating ConnectivityTestService to use the same Responses API format for consistency.