Add max_retries and timeout support to OpenaiChatModel#116
Add max_retries and timeout support to OpenaiChatModel#116Wangzy455 wants to merge 7 commits intoagentscope-ai:mainfrom
Conversation
Summary of ChangesHello @Wangzy455, I'm Gemini Code Assist1! I'm currently reviewing this pull request and will post my feedback shortly. In the meantime, here's a summary to help you and other reviewers quickly get up to speed! This pull request primarily enhances the Highlights
Changelog
Activity
Using Gemini Code AssistThe full guide for Gemini Code Assist can be found on our documentation page, here are some quick tips. Invoking Gemini You can request assistance from Gemini at any point by creating a comment using either
Customization To customize Gemini Code Assist for GitHub experience, repository maintainers can create a configuration file and/or provide a custom code review style guide (such as PEP-8 for Python) by creating and adding files to a Limitations & Feedback Gemini Code Assist may make mistakes. Please leave feedback on any instances where its feedback is incorrect or counter productive. You can react with 👍 and 👎 on @gemini-code-assist comments. If you're interested in giving your feedback about your experience with Gemini Code Assist for Github and other Google products, sign up here. You can also get AI-powered code generation, chat, as well as code reviews directly in the IDE at no cost with the Gemini Code Assist IDE Extension. Footnotes
|
There was a problem hiding this comment.
Code Review
This pull request adds support for max_retries and timeout to the OpenaiChatModel, which is a useful enhancement for controlling the behavior of the OpenAI client. The implementation is correct, and the changes are accompanied by a comprehensive set of new tests. I've suggested a refactoring for the new tests to reduce code duplication and improve maintainability. Additionally, a minor typo was fixed in a prompt string. Overall, this is a good contribution.
| @patch("openjudge.models.openai_chat_model.AsyncOpenAI") | ||
| def test_max_retries_and_timeout_parameters(self, mock_async_openai): | ||
| """Test that max_retries and timeout parameters are passed to AsyncOpenAI client.""" | ||
| # Test with specific max_retries and timeout values | ||
| model = OpenAIChatModel( | ||
| model="gpt-4", | ||
| api_key="test-key", | ||
| max_retries=5, | ||
| timeout=120.0, | ||
| ) | ||
|
|
||
| # Verify AsyncOpenAI was called with correct parameters | ||
| mock_async_openai.assert_called_once() | ||
| call_kwargs = mock_async_openai.call_args[1] | ||
| assert call_kwargs["api_key"] == "test-key" | ||
| assert call_kwargs["max_retries"] == 5 | ||
| assert call_kwargs["timeout"] == 120.0 | ||
|
|
||
| @patch("openjudge.models.openai_chat_model.AsyncOpenAI") | ||
| def test_max_retries_and_timeout_default_values(self, mock_async_openai): | ||
| """Test that max_retries and timeout use SDK defaults when not provided.""" | ||
| # Test without providing max_retries and timeout | ||
| model = OpenAIChatModel( | ||
| model="gpt-4", | ||
| api_key="test-key", | ||
| ) | ||
|
|
||
| # Verify AsyncOpenAI was called | ||
| mock_async_openai.assert_called_once() | ||
| call_kwargs = mock_async_openai.call_args[1] | ||
| assert call_kwargs["api_key"] == "test-key" | ||
| # When None, these parameters should not be in the call_kwargs | ||
| assert "max_retries" not in call_kwargs | ||
| assert "timeout" not in call_kwargs | ||
|
|
||
| @patch("openjudge.models.openai_chat_model.AsyncOpenAI") | ||
| def test_max_retries_and_timeout_with_none_values(self, mock_async_openai): | ||
| """Test that explicitly passing None for max_retries and timeout doesn't add them to client_args.""" | ||
| # Test with explicit None values | ||
| model = OpenAIChatModel( | ||
| model="gpt-4", | ||
| api_key="test-key", | ||
| max_retries=None, | ||
| timeout=None, | ||
| ) | ||
|
|
||
| # Verify AsyncOpenAI was called | ||
| mock_async_openai.assert_called_once() | ||
| call_kwargs = mock_async_openai.call_args[1] | ||
| assert call_kwargs["api_key"] == "test-key" | ||
| # When explicitly None, these parameters should not be in the call_kwargs | ||
| assert "max_retries" not in call_kwargs | ||
| assert "timeout" not in call_kwargs | ||
|
|
||
| @patch("openjudge.models.openai_chat_model.AsyncOpenAI") | ||
| def test_max_retries_only(self, mock_async_openai): | ||
| """Test that only max_retries is passed when timeout is not provided.""" | ||
| # Test with only max_retries | ||
| model = OpenAIChatModel( | ||
| model="gpt-4", | ||
| api_key="test-key", | ||
| max_retries=3, | ||
| ) | ||
|
|
||
| # Verify AsyncOpenAI was called with correct parameters | ||
| mock_async_openai.assert_called_once() | ||
| call_kwargs = mock_async_openai.call_args[1] | ||
| assert call_kwargs["api_key"] == "test-key" | ||
| assert call_kwargs["max_retries"] == 3 | ||
| assert "timeout" not in call_kwargs | ||
|
|
||
| @patch("openjudge.models.openai_chat_model.AsyncOpenAI") | ||
| def test_timeout_only(self, mock_async_openai): | ||
| """Test that only timeout is passed when max_retries is not provided.""" | ||
| # Test with only timeout | ||
| model = OpenAIChatModel( | ||
| model="gpt-4", | ||
| api_key="test-key", | ||
| timeout=30.0, | ||
| ) | ||
|
|
||
| # Verify AsyncOpenAI was called with correct parameters | ||
| mock_async_openai.assert_called_once() | ||
| call_kwargs = mock_async_openai.call_args[1] | ||
| assert call_kwargs["api_key"] == "test-key" | ||
| assert call_kwargs["timeout"] == 30.0 | ||
| assert "max_retries" not in call_kwargs | ||
|
|
There was a problem hiding this comment.
These five tests for max_retries and timeout have very similar structures and contain duplicated code. They can be consolidated into a single parameterized test using pytest.mark.parametrize. This will make the tests more concise, easier to read, and simpler to maintain or extend in the future.
@pytest.mark.parametrize(
"init_kwargs, expected_in_call, not_expected_in_call",
[
(
{"max_retries": 5, "timeout": 120.0},
{"max_retries": 5, "timeout": 120.0},
[],
),
({}, {}, ["max_retries", "timeout"]),
({"max_retries": None, "timeout": None}, {}, ["max_retries", "timeout"]),
({"max_retries": 3}, {"max_retries": 3}, ["timeout"]),
({"timeout": 30.0}, {"timeout": 30.0}, ["max_retries"]),
],
ids=[
"with_retries_and_timeout",
"defaults",
"with_none_values",
"with_retries_only",
"with_timeout_only",
],
)
@patch("openjudge.models.openai_chat_model.AsyncOpenAI")
def test_client_initialization_with_retries_and_timeout(
self,
mock_async_openai,
init_kwargs,
expected_in_call,
not_expected_in_call,
):
"""Test that max_retries and timeout parameters are passed to AsyncOpenAI client correctly."""
OpenAIChatModel(
model="gpt-4",
api_key="test-key",
**init_kwargs,
)
mock_async_openai.assert_called_once()
call_kwargs = mock_async_openai.call_args[1]
assert call_kwargs["api_key"] == "test-key"
for key, value in expected_in_call.items():
assert key in call_kwargs
assert call_kwargs[key] == value
for key in not_expected_in_call:
assert key not in call_kwargs# Conflicts: # openjudge/models/openai_chat_model.py
OpenJudge Version
0.2.0
Description
for #83
Add max_retries and timeout support to OpenaiChatModel
Checklist
Please check the following items before code is ready to be reviewed.
pre-commit run --all-filescommand