Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,15 @@ def _process_create_args(

converted_tools = convert_tools(tools)

# Guardrail: structured output (Pydantic model) cannot be combined with tool calling.
# TODO: long-term, this could be a dedicated configuration error type (e.g. IncompatibleModelConfigurationError).
if response_format_value is not None and len(converted_tools) > 0:
raise ValueError(
"Cannot use structured output (output_content_type) together with function tools. "
"The OpenAI structured output API does not support tool calling in this mode. "
"Either remove output_content_type or remove tools."
)

# Process tool_choice parameter
if isinstance(tool_choice, Tool):
if len(tools) == 0:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@ class MyArgs(BaseModel):
query: str = Field(description="The description.")


class Weather(BaseModel):
"""Minimal Pydantic model for structured-output guardrail tests (issue #7132)."""

city: str = Field(description="City name.")


class MockChunkDefinition(BaseModel):
# defining elements for diffentiating mocking chunks
chunk_choice: ChunkChoice
Expand Down Expand Up @@ -2508,6 +2514,74 @@ async def test_single_system_message_for_gemini_model() -> None:
assert system_messages[0]["content"] == "I am the only system message"


# --- Issue #7132: guardrail for structured output + tools ---


def _dummy_tool_for_guardrail(city: str) -> str:
"""Minimal tool for testing structured-output vs tools guardrail."""
return f"Weather in {city}"


@pytest.mark.asyncio
async def test_structured_output_with_tools_raises_value_error() -> None:
"""Pydantic json_output + tools must raise ValueError (guardrail for issue #7132)."""
mock_client = MagicMock()
client = BaseOpenAIChatCompletionClient(
client=mock_client,
create_args={"model": "gpt-4o"},
model_info={
"vision": False,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN,
"structured_output": True,
},
)
tool = FunctionTool(_dummy_tool_for_guardrail, name="get_weather", description="Get weather", strict=True)
messages: List[LLMMessage] = [UserMessage(content="What is the weather in Paris?", source="user")]

with pytest.raises(ValueError) as exc_info:
client._process_create_args( # pyright: ignore[reportPrivateUsage]
messages=messages,
tools=[tool],
json_output=Weather,
extra_create_args={},
tool_choice="auto",
)

assert "Cannot use structured output (output_content_type) together with function tools" in str(exc_info.value)


@pytest.mark.asyncio
async def test_structured_output_without_tools_passes() -> None:
"""Pydantic json_output + no tools must not raise (guardrail does not apply)."""
mock_client = MagicMock()
client = BaseOpenAIChatCompletionClient(
client=mock_client,
create_args={"model": "gpt-4o"},
model_info={
"vision": False,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN,
"structured_output": True,
},
)
messages: List[LLMMessage] = [UserMessage(content="Return Paris.", source="user")]

create_params = client._process_create_args( # pyright: ignore[reportPrivateUsage]
messages=messages,
tools=[],
json_output=Weather,
extra_create_args={},
tool_choice="auto",
)

assert create_params.response_format is Weather
assert create_params.messages is not None
assert len(create_params.tools) == 0


def noop(input: str) -> str:
return "done"

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from autogen_ext.models.openai import OpenAIChatCompletionClient
from autogen_core.models import ModelFamily
from autogen_core.tools import FunctionTool
from dotenv import load_dotenv
import os
import asyncio
from pydantic import BaseModel, Field

load_dotenv()

class weather(BaseModel):
city: str = Field(..., description="The city we get weather from")
temperature: int = Field(..., description="Temperature in Farenheits")

async def main() -> None:
# Ensure these are set in your .env file
model_client = OpenAIChatCompletionClient(
base_url=os.getenv("OPEN_ROUTER_BASE_URL"),
api_key=os.getenv("OPEN_ROUTER_API_KEY"),
model="openai/gpt-oss-20b:free", # Or any OpenRouter model
model_info={
"vision": False,
"function_calling": True,
"json_output": True,
"family": ModelFamily.UNKNOWN,
"structured_output": True,
}
)

async def get_weather(city: str) -> str:
"""Get the weather for a given city."""
return f"The weather in {city} is 73 degrees and Sunny."

agent = AssistantAgent(
name="weather_agent",
model_client=model_client,
tools=[FunctionTool(get_weather, description="get weather", strict=True)],
output_content_type=weather,
system_message="You are a helpful assistant. Use your function calls",
reflect_on_tool_use=True,
model_client_stream=True,
)

await Console(agent.run_stream(task="What is the weather in New York?"))
await model_client.close()

if __name__ == "__main__":
asyncio.run(main())