Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ requires-python = ">=3.9"
license = "MIT"
authors = [{ name = "OpenAI", email = "[email protected]" }]
dependencies = [
"openai>=1.107.1,<2",
"openai>=2.2,<3",
"pydantic>=2.10, <3",
"griffe>=1.5.6, <2",
"typing-extensions>=4.12.2, <5",
Expand Down
4 changes: 2 additions & 2 deletions src/agents/extensions/models/litellm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"dependency group: `pip install 'openai-agents[litellm]'`."
) from _e

from openai import NOT_GIVEN, AsyncStream, NotGiven
from openai import AsyncStream, NotGiven, omit
from openai.types.chat import (
ChatCompletionChunk,
ChatCompletionMessageCustomToolCall,
Expand Down Expand Up @@ -374,7 +374,7 @@ async def _fetch_response(
object="response",
output=[],
tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
if tool_choice != NOT_GIVEN
if tool_choice is not omit
else "auto",
top_p=model_settings.top_p,
temperature=model_settings.temperature,
Expand Down
15 changes: 9 additions & 6 deletions src/agents/models/chatcmpl_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from collections.abc import Iterable
from typing import Any, Literal, cast

from openai import NOT_GIVEN, NotGiven
from openai import Omit, omit
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
Expand Down Expand Up @@ -54,9 +54,9 @@ class Converter:
@classmethod
def convert_tool_choice(
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
) -> ChatCompletionToolChoiceOptionParam | NotGiven:
) -> ChatCompletionToolChoiceOptionParam | Omit:
if tool_choice is None:
return NOT_GIVEN
return omit
elif isinstance(tool_choice, MCPToolChoice):
raise UserError("MCPToolChoice is not supported for Chat Completions models")
elif tool_choice == "auto":
Expand All @@ -76,9 +76,9 @@ def convert_tool_choice(
@classmethod
def convert_response_format(
cls, final_output_schema: AgentOutputSchemaBase | None
) -> ResponseFormat | NotGiven:
) -> ResponseFormat | Omit:
if not final_output_schema or final_output_schema.is_plain_text():
return NOT_GIVEN
return omit

return {
"type": "json_schema",
Expand Down Expand Up @@ -506,10 +506,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
# 5) function call output => tool message
elif func_output := cls.maybe_function_tool_call_output(item):
flush_assistant_message()
output_content = cast(
str | Iterable[ResponseInputContentParam], func_output["output"]
)
msg: ChatCompletionToolMessageParam = {
"role": "tool",
"tool_call_id": func_output["call_id"],
"content": func_output["output"],
"content": cls.extract_text_content(output_content),
}
result.append(msg)

Expand Down
53 changes: 27 additions & 26 deletions src/agents/models/openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
import json
import time
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Any, Literal, overload
from typing import TYPE_CHECKING, Any, Literal, cast, overload

from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream
from openai import AsyncOpenAI, AsyncStream, Omit, omit
from openai.types import ChatModel
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
from openai.types.chat.chat_completion import Choice
Expand Down Expand Up @@ -44,8 +44,8 @@ def __init__(
self.model = model
self._client = openai_client

def _non_null_or_not_given(self, value: Any) -> Any:
return value if value is not None else NOT_GIVEN
def _non_null_or_omit(self, value: Any) -> Any:
return value if value is not None else omit

async def get_response(
self,
Expand Down Expand Up @@ -243,13 +243,12 @@ async def _fetch_response(
if tracing.include_data():
span.span_data.input = converted_messages

parallel_tool_calls = (
True
if model_settings.parallel_tool_calls and tools and len(tools) > 0
else False
if model_settings.parallel_tool_calls is False
else NOT_GIVEN
)
if model_settings.parallel_tool_calls and tools:
parallel_tool_calls: bool | Omit = True
elif model_settings.parallel_tool_calls is False:
parallel_tool_calls = False
else:
parallel_tool_calls = omit
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
response_format = Converter.convert_response_format(output_schema)

Expand All @@ -259,6 +258,7 @@ async def _fetch_response(
converted_tools.append(Converter.convert_handoff_tool(handoff))

converted_tools = _to_dump_compatible(converted_tools)
tools_param = converted_tools if converted_tools else omit

if _debug.DONT_LOG_MODEL_DATA:
logger.debug("Calling LLM")
Expand Down Expand Up @@ -288,28 +288,30 @@ async def _fetch_response(
self._get_client(), model_settings, stream=stream
)

stream_param: Literal[True] | Omit = True if stream else omit

ret = await self._get_client().chat.completions.create(
model=self.model,
messages=converted_messages,
tools=converted_tools or NOT_GIVEN,
temperature=self._non_null_or_not_given(model_settings.temperature),
top_p=self._non_null_or_not_given(model_settings.top_p),
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
tools=tools_param,
temperature=self._non_null_or_omit(model_settings.temperature),
top_p=self._non_null_or_omit(model_settings.top_p),
frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty),
presence_penalty=self._non_null_or_omit(model_settings.presence_penalty),
max_tokens=self._non_null_or_omit(model_settings.max_tokens),
tool_choice=tool_choice,
response_format=response_format,
parallel_tool_calls=parallel_tool_calls,
stream=stream,
stream_options=self._non_null_or_not_given(stream_options),
store=self._non_null_or_not_given(store),
reasoning_effort=self._non_null_or_not_given(reasoning_effort),
verbosity=self._non_null_or_not_given(model_settings.verbosity),
top_logprobs=self._non_null_or_not_given(model_settings.top_logprobs),
stream=cast(Any, stream_param),
stream_options=self._non_null_or_omit(stream_options),
store=self._non_null_or_omit(store),
reasoning_effort=self._non_null_or_omit(reasoning_effort),
verbosity=self._non_null_or_omit(model_settings.verbosity),
top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),
extra_headers=self._merge_headers(model_settings),
extra_query=model_settings.extra_query,
extra_body=model_settings.extra_body,
metadata=self._non_null_or_not_given(model_settings.metadata),
metadata=self._non_null_or_omit(model_settings.metadata),
**(model_settings.extra_args or {}),
)

Expand All @@ -319,14 +321,13 @@ async def _fetch_response(
responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice(
model_settings.tool_choice
)
if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN:
if responses_tool_choice is None or responses_tool_choice is omit:
# For Responses API data compatibility with Chat Completions patterns,
# we need to set "none" if tool_choice is absent.
# Without this fix, you'll get the following error:
# pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response
# tool_choice.literal['none','auto','required']
# Input should be 'none', 'auto' or 'required'
# [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven]
# see also: https://github.com/openai/openai-agents-python/issues/980
responses_tool_choice = "auto"

Expand Down
58 changes: 30 additions & 28 deletions src/agents/models/openai_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, cast, overload

from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
from openai import APIStatusError, AsyncOpenAI, AsyncStream, Omit, omit
from openai.types import ChatModel
from openai.types.responses import (
Response,
Expand Down Expand Up @@ -69,8 +69,8 @@ def __init__(
self.model = model
self._client = openai_client

def _non_null_or_not_given(self, value: Any) -> Any:
return value if value is not None else NOT_GIVEN
def _non_null_or_omit(self, value: Any) -> Any:
return value if value is not None else omit

async def get_response(
self,
Expand Down Expand Up @@ -249,13 +249,12 @@ async def _fetch_response(
list_input = ItemHelpers.input_to_new_input_list(input)
list_input = _to_dump_compatible(list_input)

parallel_tool_calls = (
True
if model_settings.parallel_tool_calls and tools and len(tools) > 0
else False
if model_settings.parallel_tool_calls is False
else NOT_GIVEN
)
if model_settings.parallel_tool_calls and tools:
parallel_tool_calls: bool | Omit = True
elif model_settings.parallel_tool_calls is False:
parallel_tool_calls = False
else:
parallel_tool_calls = omit

tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
converted_tools = Converter.convert_tools(tools, handoffs)
Expand Down Expand Up @@ -297,36 +296,39 @@ async def _fetch_response(
if model_settings.top_logprobs is not None:
extra_args["top_logprobs"] = model_settings.top_logprobs
if model_settings.verbosity is not None:
if response_format != NOT_GIVEN:
if response_format is not omit:
response_format["verbosity"] = model_settings.verbosity # type: ignore [index]
else:
response_format = {"verbosity": model_settings.verbosity}

return await self._client.responses.create(
previous_response_id=self._non_null_or_not_given(previous_response_id),
conversation=self._non_null_or_not_given(conversation_id),
instructions=self._non_null_or_not_given(system_instructions),
stream_param: Literal[True] | Omit = True if stream else omit

response = await self._client.responses.create(
previous_response_id=self._non_null_or_omit(previous_response_id),
conversation=self._non_null_or_omit(conversation_id),
instructions=self._non_null_or_omit(system_instructions),
model=self.model,
input=list_input,
include=include,
tools=converted_tools_payload,
prompt=self._non_null_or_not_given(prompt),
temperature=self._non_null_or_not_given(model_settings.temperature),
top_p=self._non_null_or_not_given(model_settings.top_p),
truncation=self._non_null_or_not_given(model_settings.truncation),
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
prompt=self._non_null_or_omit(prompt),
temperature=self._non_null_or_omit(model_settings.temperature),
top_p=self._non_null_or_omit(model_settings.top_p),
truncation=self._non_null_or_omit(model_settings.truncation),
max_output_tokens=self._non_null_or_omit(model_settings.max_tokens),
tool_choice=tool_choice,
parallel_tool_calls=parallel_tool_calls,
stream=stream,
stream=cast(Any, stream_param),
extra_headers=self._merge_headers(model_settings),
extra_query=model_settings.extra_query,
extra_body=model_settings.extra_body,
text=response_format,
store=self._non_null_or_not_given(model_settings.store),
reasoning=self._non_null_or_not_given(model_settings.reasoning),
metadata=self._non_null_or_not_given(model_settings.metadata),
store=self._non_null_or_omit(model_settings.store),
reasoning=self._non_null_or_omit(model_settings.reasoning),
metadata=self._non_null_or_omit(model_settings.metadata),
**extra_args,
)
return cast(Response | AsyncStream[ResponseStreamEvent], response)

def _get_client(self) -> AsyncOpenAI:
if self._client is None:
Expand All @@ -351,9 +353,9 @@ class Converter:
@classmethod
def convert_tool_choice(
cls, tool_choice: Literal["auto", "required", "none"] | str | MCPToolChoice | None
) -> response_create_params.ToolChoice | NotGiven:
) -> response_create_params.ToolChoice | Omit:
if tool_choice is None:
return NOT_GIVEN
return omit
elif isinstance(tool_choice, MCPToolChoice):
return {
"server_label": tool_choice.server_label,
Expand Down Expand Up @@ -404,9 +406,9 @@ def convert_tool_choice(
@classmethod
def get_response_format(
cls, output_schema: AgentOutputSchemaBase | None
) -> ResponseTextConfigParam | NotGiven:
) -> ResponseTextConfigParam | Omit:
if output_schema is None or output_schema.is_plain_text():
return NOT_GIVEN
return omit
else:
return {
"format": {
Expand Down
1 change: 1 addition & 0 deletions tests/fake_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,7 @@ async def stream_response(
item_id=output_item.call_id,
output_index=output_index,
arguments=output_item.arguments,
name=output_item.name,
sequence_number=sequence_number,
)
sequence_number += 1
Expand Down
4 changes: 2 additions & 2 deletions tests/test_model_payload_iterators.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import httpx
import pytest
from openai import NOT_GIVEN
from openai import omit
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.responses import ToolParam

Expand Down Expand Up @@ -82,7 +82,7 @@ class DummyCompletions:
async def create(self, **kwargs):
captured_kwargs.update(kwargs)
_force_materialization(kwargs["messages"])
if kwargs["tools"] is not NOT_GIVEN:
if kwargs["tools"] is not omit:
_force_materialization(kwargs["tools"])
return ChatCompletion(
id="dummy-id",
Expand Down
20 changes: 10 additions & 10 deletions tests/test_openai_chatcompletions.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import httpx
import pytest
from openai import NOT_GIVEN, AsyncOpenAI
from openai import AsyncOpenAI, omit
from openai.types.chat.chat_completion import ChatCompletion, Choice
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_message import ChatCompletionMessage
Expand Down Expand Up @@ -285,17 +285,17 @@ def __init__(self, completions: DummyCompletions) -> None:
assert result is chat
# Ensure expected args were passed through to OpenAI client.
kwargs = completions.kwargs
assert kwargs["stream"] is False
assert kwargs["store"] is NOT_GIVEN
assert kwargs["stream"] is omit
assert kwargs["store"] is omit
assert kwargs["model"] == "gpt-4"
assert kwargs["messages"][0]["role"] == "system"
assert kwargs["messages"][0]["content"] == "sys"
assert kwargs["messages"][1]["role"] == "user"
# Defaults for optional fields become the NOT_GIVEN sentinel
assert kwargs["tools"] is NOT_GIVEN
assert kwargs["tool_choice"] is NOT_GIVEN
assert kwargs["response_format"] is NOT_GIVEN
assert kwargs["stream_options"] is NOT_GIVEN
# Defaults for optional fields become the omit sentinel
assert kwargs["tools"] is omit
assert kwargs["tool_choice"] is omit
assert kwargs["response_format"] is omit
assert kwargs["stream_options"] is omit


@pytest.mark.asyncio
Expand Down Expand Up @@ -340,8 +340,8 @@ def __init__(self, completions: DummyCompletions) -> None:
)
# Check OpenAI client was called for streaming
assert completions.kwargs["stream"] is True
assert completions.kwargs["store"] is NOT_GIVEN
assert completions.kwargs["stream_options"] is NOT_GIVEN
assert completions.kwargs["store"] is omit
assert completions.kwargs["stream_options"] is omit
# Response is a proper openai Response
assert isinstance(response, Response)
assert response.id == FAKE_RESPONSES_ID
Expand Down
Loading
Loading