From fb4372381fb0496c03badf7abb9546deebfe1232 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Wed, 8 Oct 2025 15:10:25 -0700 Subject: [PATCH 1/5] Migrate openai from 1.x to 2.2.0 --- pyproject.toml | 2 +- uv.lock | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 28982acee..0df03f1d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires-python = ">=3.9" license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] dependencies = [ - "openai>=1.107.1,<2", + "openai>=2.2,<3", "pydantic>=2.10, <3", "griffe>=1.5.6, <2", "typing-extensions>=4.12.2, <5", diff --git a/uv.lock b/uv.lock index fe98e513a..58710d7e8 100644 --- a/uv.lock +++ b/uv.lock @@ -1858,7 +1858,7 @@ wheels = [ [[package]] name = "openai" -version = "1.107.1" +version = "2.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1870,9 +1870,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f3/e0/a62daa7ff769df969cc1b782852cace79615039630b297005356f5fb46fb/openai-1.107.1.tar.gz", hash = "sha256:7c51b6b8adadfcf5cada08a613423575258b180af5ad4bc2954b36ebc0d3ad48", size = 563671, upload-time = "2025-09-10T15:04:40.288Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b8/b1/8201e321a7d64a25c6f5a560320272d8be70547add40311fceb916518632/openai-2.2.0.tar.gz", hash = "sha256:bc49d077a8bf0e370eec4d038bc05e232c20855a19df0b58e5b3e5a8da7d33e0", size = 588512, upload-time = "2025-10-06T18:08:13.665Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/12/32c19999a58eec4a695e8ce334442b6135df949f0bb61b2ceaa4fa60d3a9/openai-1.107.1-py3-none-any.whl", hash = "sha256:168f9885b1b70d13ada0868a0d0adfd538c16a02f7fd9fe063851a2c9a025e72", size = 945177, upload-time = "2025-09-10T15:04:37.782Z" }, + { url = "https://files.pythonhosted.org/packages/cb/92/6aeef1836e66dfec7f7f160a4f06d7041be7f6ccfc47a2f0f5738b332245/openai-2.2.0-py3-none-any.whl", hash = "sha256:d222e63436e33f3134a3d7ce490dc2d2f146fa98036eb65cc225df3ce163916f", size = 998972, upload-time = "2025-10-06T18:08:11.775Z" }, ] [[package]] @@ -1952,7 +1952,7 @@ requires-dist = [ { name = "litellm", marker = "extra == 'litellm'", specifier = ">=1.67.4.post1,<2" }, { name = "mcp", marker = "python_full_version >= '3.10'", specifier = ">=1.11.0,<2" }, { name = "numpy", marker = "python_full_version >= '3.10' and extra == 'voice'", specifier = ">=2.2.0,<3" }, - { name = "openai", specifier = ">=1.107.1,<2" }, + { name = "openai", specifier = ">=2.2,<3" }, { name = "pydantic", specifier = ">=2.10,<3" }, { name = "redis", marker = "extra == 'redis'", specifier = ">=6.4.0" }, { name = "requests", specifier = ">=2.0,<3" }, From e4fe1e0a2378cf03c8c23d3a3d26f6caf44c1f5e Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Thu, 9 Oct 2025 08:24:26 -0700 Subject: [PATCH 2/5] more migrations; --- src/agents/extensions/models/litellm_model.py | 4 +- src/agents/models/chatcmpl_converter.py | 15 +++-- src/agents/models/openai_chatcompletions.py | 53 ++++++++--------- src/agents/models/openai_responses.py | 58 ++++++++++--------- tests/test_model_payload_iterators.py | 4 +- tests/test_openai_chatcompletions.py | 20 +++---- .../test_openai_chatcompletions_converter.py | 13 ++--- tests/test_openai_responses_converter.py | 14 ++--- 8 files changed, 93 insertions(+), 88 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index a4c8da3ab..d11fd0792 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -18,7 +18,7 @@ "dependency group: `pip install 'openai-agents[litellm]'`." ) from _e -from openai import NOT_GIVEN, AsyncStream, NotGiven +from openai import AsyncStream, NotGiven, omit from openai.types.chat import ( ChatCompletionChunk, ChatCompletionMessageCustomToolCall, @@ -374,7 +374,7 @@ async def _fetch_response( object="response", output=[], tool_choice=cast(Literal["auto", "required", "none"], tool_choice) - if tool_choice != NOT_GIVEN + if tool_choice is not omit else "auto", top_p=model_settings.top_p, temperature=model_settings.temperature, diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index c82491ab6..ddcaa6ecd 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -4,7 +4,7 @@ from collections.abc import Iterable from typing import Any, Literal, cast -from openai import NOT_GIVEN, NotGiven +from openai import Omit, omit from openai.types.chat import ( ChatCompletionAssistantMessageParam, ChatCompletionContentPartImageParam, @@ -54,9 +54,9 @@ class Converter: @classmethod def convert_tool_choice( cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None - ) -> ChatCompletionToolChoiceOptionParam | NotGiven: + ) -> ChatCompletionToolChoiceOptionParam | Omit: if tool_choice is None: - return NOT_GIVEN + return omit elif isinstance(tool_choice, MCPToolChoice): raise UserError("MCPToolChoice is not supported for Chat Completions models") elif tool_choice == "auto": @@ -76,9 +76,9 @@ def convert_tool_choice( @classmethod def convert_response_format( cls, final_output_schema: AgentOutputSchemaBase | None - ) -> ResponseFormat | NotGiven: + ) -> ResponseFormat | Omit: if not final_output_schema or final_output_schema.is_plain_text(): - return NOT_GIVEN + return omit return { "type": "json_schema", @@ -506,10 +506,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: # 5) function call output => tool message elif func_output := cls.maybe_function_tool_call_output(item): flush_assistant_message() + output_content = cast( + str | Iterable[ResponseInputContentParam], func_output["output"] + ) msg: ChatCompletionToolMessageParam = { "role": "tool", "tool_call_id": func_output["call_id"], - "content": func_output["output"], + "content": cls.extract_text_content(output_content), } result.append(msg) diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 206510c8d..d6cf662d2 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -3,9 +3,9 @@ import json import time from collections.abc import AsyncIterator -from typing import TYPE_CHECKING, Any, Literal, overload +from typing import TYPE_CHECKING, Any, Literal, cast, overload -from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream +from openai import AsyncOpenAI, AsyncStream, Omit, omit from openai.types import ChatModel from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage from openai.types.chat.chat_completion import Choice @@ -44,8 +44,8 @@ def __init__( self.model = model self._client = openai_client - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN + def _non_null_or_omit(self, value: Any) -> Any: + return value if value is not None else omit async def get_response( self, @@ -243,13 +243,12 @@ async def _fetch_response( if tracing.include_data(): span.span_data.input = converted_messages - parallel_tool_calls = ( - True - if model_settings.parallel_tool_calls and tools and len(tools) > 0 - else False - if model_settings.parallel_tool_calls is False - else NOT_GIVEN - ) + if model_settings.parallel_tool_calls and tools: + parallel_tool_calls: bool | Omit = True + elif model_settings.parallel_tool_calls is False: + parallel_tool_calls = False + else: + parallel_tool_calls = omit tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) response_format = Converter.convert_response_format(output_schema) @@ -259,6 +258,7 @@ async def _fetch_response( converted_tools.append(Converter.convert_handoff_tool(handoff)) converted_tools = _to_dump_compatible(converted_tools) + tools_param = converted_tools if converted_tools else omit if _debug.DONT_LOG_MODEL_DATA: logger.debug("Calling LLM") @@ -288,28 +288,30 @@ async def _fetch_response( self._get_client(), model_settings, stream=stream ) + stream_param: Literal[True] | Omit = True if stream else omit + ret = await self._get_client().chat.completions.create( model=self.model, messages=converted_messages, - tools=converted_tools or NOT_GIVEN, - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty), - presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty), - max_tokens=self._non_null_or_not_given(model_settings.max_tokens), + tools=tools_param, + temperature=self._non_null_or_omit(model_settings.temperature), + top_p=self._non_null_or_omit(model_settings.top_p), + frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty), + presence_penalty=self._non_null_or_omit(model_settings.presence_penalty), + max_tokens=self._non_null_or_omit(model_settings.max_tokens), tool_choice=tool_choice, response_format=response_format, parallel_tool_calls=parallel_tool_calls, - stream=stream, - stream_options=self._non_null_or_not_given(stream_options), - store=self._non_null_or_not_given(store), - reasoning_effort=self._non_null_or_not_given(reasoning_effort), - verbosity=self._non_null_or_not_given(model_settings.verbosity), - top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs), + stream=cast(Any, stream_param), + stream_options=self._non_null_or_omit(stream_options), + store=self._non_null_or_omit(store), + reasoning_effort=self._non_null_or_omit(reasoning_effort), + verbosity=self._non_null_or_omit(model_settings.verbosity), + top_logprobs=self._non_null_or_omit(model_settings.top_logprobs), extra_headers=self._merge_headers(model_settings), extra_query=model_settings.extra_query, extra_body=model_settings.extra_body, - metadata=self._non_null_or_not_given(model_settings.metadata), + metadata=self._non_null_or_omit(model_settings.metadata), **(model_settings.extra_args or {}), ) @@ -319,14 +321,13 @@ async def _fetch_response( responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice( model_settings.tool_choice ) - if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN: + if responses_tool_choice is None or responses_tool_choice is omit: # For Responses API data compatibility with Chat Completions patterns, # we need to set "none" if tool_choice is absent. # Without this fix, you'll get the following error: # pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response # tool_choice.literal['none','auto','required'] # Input should be 'none', 'auto' or 'required' - # [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven] # see also: https://github.com/openai/openai-agents-python/issues/980 responses_tool_choice = "auto" diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index de8cd93ff..74ebd1c5a 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -6,7 +6,7 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Literal, cast, overload -from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven +from openai import APIStatusError, AsyncOpenAI, AsyncStream, Omit, omit from openai.types import ChatModel from openai.types.responses import ( Response, @@ -69,8 +69,8 @@ def __init__( self.model = model self._client = openai_client - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN + def _non_null_or_omit(self, value: Any) -> Any: + return value if value is not None else omit async def get_response( self, @@ -249,13 +249,12 @@ async def _fetch_response( list_input = ItemHelpers.input_to_new_input_list(input) list_input = _to_dump_compatible(list_input) - parallel_tool_calls = ( - True - if model_settings.parallel_tool_calls and tools and len(tools) > 0 - else False - if model_settings.parallel_tool_calls is False - else NOT_GIVEN - ) + if model_settings.parallel_tool_calls and tools: + parallel_tool_calls: bool | Omit = True + elif model_settings.parallel_tool_calls is False: + parallel_tool_calls = False + else: + parallel_tool_calls = omit tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) converted_tools = Converter.convert_tools(tools, handoffs) @@ -297,36 +296,39 @@ async def _fetch_response( if model_settings.top_logprobs is not None: extra_args["top_logprobs"] = model_settings.top_logprobs if model_settings.verbosity is not None: - if response_format != NOT_GIVEN: + if response_format is not omit: response_format["verbosity"] = model_settings.verbosity # type: ignore [index] else: response_format = {"verbosity": model_settings.verbosity} - return await self._client.responses.create( - previous_response_id=self._non_null_or_not_given(previous_response_id), - conversation=self._non_null_or_not_given(conversation_id), - instructions=self._non_null_or_not_given(system_instructions), + stream_param: Literal[True] | Omit = True if stream else omit + + response = await self._client.responses.create( + previous_response_id=self._non_null_or_omit(previous_response_id), + conversation=self._non_null_or_omit(conversation_id), + instructions=self._non_null_or_omit(system_instructions), model=self.model, input=list_input, include=include, tools=converted_tools_payload, - prompt=self._non_null_or_not_given(prompt), - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - truncation=self._non_null_or_not_given(model_settings.truncation), - max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens), + prompt=self._non_null_or_omit(prompt), + temperature=self._non_null_or_omit(model_settings.temperature), + top_p=self._non_null_or_omit(model_settings.top_p), + truncation=self._non_null_or_omit(model_settings.truncation), + max_output_tokens=self._non_null_or_omit(model_settings.max_tokens), tool_choice=tool_choice, parallel_tool_calls=parallel_tool_calls, - stream=stream, + stream=cast(Any, stream_param), extra_headers=self._merge_headers(model_settings), extra_query=model_settings.extra_query, extra_body=model_settings.extra_body, text=response_format, - store=self._non_null_or_not_given(model_settings.store), - reasoning=self._non_null_or_not_given(model_settings.reasoning), - metadata=self._non_null_or_not_given(model_settings.metadata), + store=self._non_null_or_omit(model_settings.store), + reasoning=self._non_null_or_omit(model_settings.reasoning), + metadata=self._non_null_or_omit(model_settings.metadata), **extra_args, ) + return cast(Response | AsyncStream[ResponseStreamEvent], response) def _get_client(self) -> AsyncOpenAI: if self._client is None: @@ -351,9 +353,9 @@ class Converter: @classmethod def convert_tool_choice( cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None - ) -> response_create_params.ToolChoice | NotGiven: + ) -> response_create_params.ToolChoice | Omit: if tool_choice is None: - return NOT_GIVEN + return omit elif isinstance(tool_choice, MCPToolChoice): return { "server_label": tool_choice.server_label, @@ -404,9 +406,9 @@ def convert_tool_choice( @classmethod def get_response_format( cls, output_schema: AgentOutputSchemaBase | None - ) -> ResponseTextConfigParam | NotGiven: + ) -> ResponseTextConfigParam | Omit: if output_schema is None or output_schema.is_plain_text(): - return NOT_GIVEN + return omit else: return { "format": { diff --git a/tests/test_model_payload_iterators.py b/tests/test_model_payload_iterators.py index 9f4557601..3d7b9edc6 100644 --- a/tests/test_model_payload_iterators.py +++ b/tests/test_model_payload_iterators.py @@ -5,7 +5,7 @@ import httpx import pytest -from openai import NOT_GIVEN +from openai import omit from openai.types.chat.chat_completion import ChatCompletion from openai.types.responses import ToolParam @@ -82,7 +82,7 @@ class DummyCompletions: async def create(self, **kwargs): captured_kwargs.update(kwargs) _force_materialization(kwargs["messages"]) - if kwargs["tools"] is not NOT_GIVEN: + if kwargs["tools"] is not omit: _force_materialization(kwargs["tools"]) return ChatCompletion( id="dummy-id", diff --git a/tests/test_openai_chatcompletions.py b/tests/test_openai_chatcompletions.py index 340d9306e..3a0f75364 100644 --- a/tests/test_openai_chatcompletions.py +++ b/tests/test_openai_chatcompletions.py @@ -5,7 +5,7 @@ import httpx import pytest -from openai import NOT_GIVEN, AsyncOpenAI +from openai import AsyncOpenAI, omit from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.chat.chat_completion_message import ChatCompletionMessage @@ -285,17 +285,17 @@ def __init__(self, completions: DummyCompletions) -> None: assert result is chat # Ensure expected args were passed through to OpenAI client. kwargs = completions.kwargs - assert kwargs["stream"] is False - assert kwargs["store"] is NOT_GIVEN + assert kwargs["stream"] is omit + assert kwargs["store"] is omit assert kwargs["model"] == "gpt-4" assert kwargs["messages"][0]["role"] == "system" assert kwargs["messages"][0]["content"] == "sys" assert kwargs["messages"][1]["role"] == "user" - # Defaults for optional fields become the NOT_GIVEN sentinel - assert kwargs["tools"] is NOT_GIVEN - assert kwargs["tool_choice"] is NOT_GIVEN - assert kwargs["response_format"] is NOT_GIVEN - assert kwargs["stream_options"] is NOT_GIVEN + # Defaults for optional fields become the omit sentinel + assert kwargs["tools"] is omit + assert kwargs["tool_choice"] is omit + assert kwargs["response_format"] is omit + assert kwargs["stream_options"] is omit @pytest.mark.asyncio @@ -340,8 +340,8 @@ def __init__(self, completions: DummyCompletions) -> None: ) # Check OpenAI client was called for streaming assert completions.kwargs["stream"] is True - assert completions.kwargs["store"] is NOT_GIVEN - assert completions.kwargs["stream_options"] is NOT_GIVEN + assert completions.kwargs["store"] is omit + assert completions.kwargs["stream_options"] is omit # Response is a proper openai Response assert isinstance(response, Response) assert response.id == FAKE_RESPONSES_ID diff --git a/tests/test_openai_chatcompletions_converter.py b/tests/test_openai_chatcompletions_converter.py index 1740af3d1..4854def12 100644 --- a/tests/test_openai_chatcompletions_converter.py +++ b/tests/test_openai_chatcompletions_converter.py @@ -26,6 +26,7 @@ from typing import Literal, cast import pytest +from openai import omit from openai.types.chat import ChatCompletionMessage, ChatCompletionMessageFunctionToolCall from openai.types.chat.chat_completion_message_tool_call import Function from openai.types.responses import ( @@ -197,12 +198,12 @@ def test_items_to_messages_with_output_message_and_function_call(): def test_convert_tool_choice_handles_standard_and_named_options() -> None: """ - The `Converter.convert_tool_choice` method should return NOT_GIVEN + The `Converter.convert_tool_choice` method should return the omit sentinel if no choice is provided, pass through values like "auto", "required", or "none" unchanged, and translate any other string into a function selection dict. """ - assert Converter.convert_tool_choice(None).__class__.__name__ == "NotGiven" + assert Converter.convert_tool_choice(None) is omit assert Converter.convert_tool_choice("auto") == "auto" assert Converter.convert_tool_choice("required") == "required" assert Converter.convert_tool_choice("none") == "none" @@ -214,17 +215,15 @@ def test_convert_tool_choice_handles_standard_and_named_options() -> None: def test_convert_response_format_returns_not_given_for_plain_text_and_dict_for_schemas() -> None: """ - The `Converter.convert_response_format` method should return NOT_GIVEN + The `Converter.convert_response_format` method should return the omit sentinel when no output schema is provided or if the output schema indicates plain text. For structured output schemas, it should return a dict with type `json_schema` and include the generated JSON schema and strict flag from the provided `AgentOutputSchema`. """ # when output is plain text (schema None or output_type str), do not include response_format - assert Converter.convert_response_format(None).__class__.__name__ == "NotGiven" - assert ( - Converter.convert_response_format(AgentOutputSchema(str)).__class__.__name__ == "NotGiven" - ) + assert Converter.convert_response_format(None) is omit + assert Converter.convert_response_format(AgentOutputSchema(str)) is omit # For e.g. integer output, we expect a response_format dict schema = AgentOutputSchema(int) resp_format = Converter.convert_response_format(schema) diff --git a/tests/test_openai_responses_converter.py b/tests/test_openai_responses_converter.py index 155239887..f0ae2e816 100644 --- a/tests/test_openai_responses_converter.py +++ b/tests/test_openai_responses_converter.py @@ -15,7 +15,7 @@ the tool choice values accepted by the Responses API, including special types like `file_search` and `web_search`, and falling back to function names for arbitrary string values. -- `get_response_format` returns `openai.NOT_GIVEN` for plain-text response +- `get_response_format` returns `openai.omit` for plain-text response formats and an appropriate format dict when a JSON-structured output schema is provided. - `convert_tools` maps our internal `Tool` dataclasses into the appropriate @@ -24,7 +24,7 @@ """ import pytest -from openai import NOT_GIVEN +from openai import omit from pydantic import BaseModel from agents import ( @@ -49,7 +49,7 @@ def test_convert_tool_choice_standard_values(): to "auto"/"required"/"none" as appropriate, and that special string values map to the appropriate dicts. """ - assert Converter.convert_tool_choice(None) is NOT_GIVEN + assert Converter.convert_tool_choice(None) is omit assert Converter.convert_tool_choice("auto") == "auto" assert Converter.convert_tool_choice("required") == "required" assert Converter.convert_tool_choice("none") == "none" @@ -67,16 +67,16 @@ def test_convert_tool_choice_standard_values(): def test_get_response_format_plain_text_and_json_schema(): """ For plain text output (default, or output type of `str`), the converter - should return NOT_GIVEN, indicating no special response format constraint. + should return omit, indicating no special response format constraint. If an output schema is provided for a structured type, the converter should return a `format` dict with the schema and strictness. The exact JSON schema depends on the output type; we just assert that required keys are present and that we get back the original schema. """ # Default output (None) should be considered plain text. - assert Converter.get_response_format(None) is NOT_GIVEN - # An explicit plain-text schema (str) should also yield NOT_GIVEN. - assert Converter.get_response_format(AgentOutputSchema(str)) is NOT_GIVEN + assert Converter.get_response_format(None) is omit + # An explicit plain-text schema (str) should also yield omit. + assert Converter.get_response_format(AgentOutputSchema(str)) is omit # A model-based schema should produce a format dict. class OutModel(BaseModel): From 8c1a1681aabfbf1ca7696fcccba7069624842e36 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Thu, 9 Oct 2025 09:11:06 -0700 Subject: [PATCH 3/5] fix --- tests/fake_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fake_model.py b/tests/fake_model.py index d86870920..6e13a02a4 100644 --- a/tests/fake_model.py +++ b/tests/fake_model.py @@ -253,6 +253,7 @@ async def stream_response( item_id=output_item.call_id, output_index=output_index, arguments=output_item.arguments, + name=output_item.name, sequence_number=sequence_number, ) sequence_number += 1 From 0f1441ad7d6af1eb919181225403d98ef18547d2 Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Sat, 11 Oct 2025 08:14:01 +0900 Subject: [PATCH 4/5] fix --- src/agents/models/openai_responses.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 74ebd1c5a..36a981404 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -4,7 +4,7 @@ from collections.abc import AsyncIterator from contextvars import ContextVar from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Literal, cast, overload +from typing import TYPE_CHECKING, Any, Literal, Union, cast, overload from openai import APIStatusError, AsyncOpenAI, AsyncStream, Omit, omit from openai.types import ChatModel @@ -328,7 +328,7 @@ async def _fetch_response( metadata=self._non_null_or_omit(model_settings.metadata), **extra_args, ) - return cast(Response | AsyncStream[ResponseStreamEvent], response) + return cast(Union[Response, AsyncStream[ResponseStreamEvent]], response) def _get_client(self) -> AsyncOpenAI: if self._client is None: From c0b979cb8a6bd819c4f12abdb396ca6ed487e96b Mon Sep 17 00:00:00 2001 From: Kazuhiro Sera Date: Sat, 11 Oct 2025 08:32:42 +0900 Subject: [PATCH 5/5] fix py 3.9 error --- .gitignore | 1 + src/agents/models/chatcmpl_converter.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 0dc73ccd2..60782274e 100644 --- a/.gitignore +++ b/.gitignore @@ -103,6 +103,7 @@ celerybeat.pid .python-version .env* .venv +.venv* env/ venv/ ENV/ diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index ddcaa6ecd..a8fd9255c 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -2,7 +2,7 @@ import json from collections.abc import Iterable -from typing import Any, Literal, cast +from typing import Any, Literal, Union, cast from openai import Omit, omit from openai.types.chat import ( @@ -507,7 +507,7 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: elif func_output := cls.maybe_function_tool_call_output(item): flush_assistant_message() output_content = cast( - str | Iterable[ResponseInputContentParam], func_output["output"] + Union[str, Iterable[ResponseInputContentParam]], func_output["output"] ) msg: ChatCompletionToolMessageParam = { "role": "tool",