Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,31 @@ def to_json(
warnings=warnings,
)

def as_input(
self,
*,
warnings: bool = True,
) -> dict[str, object]:
"""Serializes this model for reuse as an API input item.

This drops fields that were never set, filters `None` values, and respects
any model-level `__api_exclude__` hints used by request serialization.
"""
data = self.model_dump(
mode="json",
by_alias=True,
exclude_unset=True,
exclude_none=True,
warnings=warnings,
)

exclude = getattr(self, "__api_exclude__", None)
if exclude:
for key in exclude:
data.pop(key, None)

return cast("dict[str, object]", data)

@override
def __str__(self) -> str:
# mypy complains about an invalid self arg
Expand Down
14 changes: 13 additions & 1 deletion src/openai/types/responses/response.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Union, Optional
from typing import List, Union, Optional, cast
from typing_extensions import Literal, TypeAlias

from .tool import Tool
Expand All @@ -23,6 +23,7 @@
from .tool_choice_function import ToolChoiceFunction
from ..shared.responses_model import ResponsesModel
from .tool_choice_apply_patch import ToolChoiceApplyPatch
from .response_input_item_param import ResponseInputItemParam

__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"]

Expand Down Expand Up @@ -319,3 +320,14 @@ def output_text(self) -> str:
texts.append(content.text)

return "".join(texts)

def output_as_input(self) -> List[ResponseInputItemParam]:
"""Serialize `output` items into follow-up `input` items.

This is useful when you are manually managing conversation state instead of
passing `previous_response_id`. The returned items reuse each output
model's `as_input()` sanitization so output-only or `None` fields are not
replayed back into `responses.create()` calls.
"""

return cast(List[ResponseInputItemParam], [item.as_input() for item in self.output])
2 changes: 2 additions & 0 deletions src/openai/types/responses/response_reasoning_item.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ class ResponseReasoningItem(BaseModel):
[managing context](https://platform.openai.com/docs/guides/conversation-state).
"""

__api_exclude__ = {"status"}

id: str
"""The unique identifier of the reasoning content."""

Expand Down
72 changes: 72 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

from openai import OpenAI, AsyncOpenAI
from openai._utils import assert_signatures_in_sync
from openai._compat import parse_obj
from openai.types.responses import Response

from ...conftest import base_url
from ..snapshots import make_snapshot_request
Expand Down Expand Up @@ -41,6 +43,76 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


def test_output_as_input() -> None:
response = parse_obj(
Response,
{
"id": "resp_123",
"object": "response",
"created_at": 1,
"model": "o4-mini",
"output": [
{
"id": "rs_123",
"type": "reasoning",
"summary": [
{
"text": "The previous answer established the capital of France.",
"type": "summary_text",
}
],
"status": None,
"encrypted_content": None,
},
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "completed",
"phase": "final_answer",
"content": [
{
"type": "output_text",
"text": "Paris.",
"annotations": [],
}
],
},
],
"parallel_tool_calls": True,
"tool_choice": "auto",
"tools": [],
},
)

assert response.output_as_input() == [
{
"id": "rs_123",
"type": "reasoning",
"summary": [
{
"text": "The previous answer established the capital of France.",
"type": "summary_text",
}
],
},
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "completed",
"phase": "final_answer",
"content": [
{
"type": "output_text",
"text": "Paris.",
"annotations": [],
}
],
},
]


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down
80 changes: 79 additions & 1 deletion tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,11 @@
import pydantic
from pydantic import Field

from openai._utils import PropertyInfo
from openai._utils import PropertyInfo, maybe_transform
from openai._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
from openai._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem
from openai.types.responses.response_create_params import ResponseCreateParamsNonStreaming


class BasicModel(BaseModel):
Expand Down Expand Up @@ -572,6 +574,82 @@ class Model(BaseModel):
assert isinstance(model_dump(m, warnings=False), dict)


def test_as_input_response_items() -> None:
reasoning = ResponseReasoningItem.construct(
id="rs_123",
type="reasoning",
summary=[{"text": "Reasoning summary", "type": "summary_text"}],
status=None,
encrypted_content=None,
)
message = ResponseOutputMessage.construct(
id="msg_123",
type="message",
role="assistant",
status="completed",
phase="final_answer",
content=[{"type": "output_text", "annotations": [], "text": "Paris"}],
)

assert reasoning.as_input() == {
"id": "rs_123",
"type": "reasoning",
"summary": [{"text": "Reasoning summary", "type": "summary_text"}],
}
assert message.as_input() == {
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "completed",
"phase": "final_answer",
"content": [{"type": "output_text", "annotations": [], "text": "Paris"}],
}


def test_request_transform_respects_api_exclude_when_reusing_response_items() -> None:
reasoning = ResponseReasoningItem.construct(
id="rs_123",
type="reasoning",
summary=[{"text": "Reasoning summary", "type": "summary_text"}],
status="completed",
)
message = ResponseOutputMessage.construct(
id="msg_123",
type="message",
role="assistant",
status="completed",
content=[{"type": "output_text", "annotations": [], "text": "Paris"}],
)

transformed = maybe_transform(
{
"input": [reasoning, message],
"model": "o4-mini",
"stream": False,
},
ResponseCreateParamsNonStreaming,
)

assert transformed == {
"input": [
{
"id": "rs_123",
"type": "reasoning",
"summary": [{"text": "Reasoning summary", "type": "summary_text"}],
},
{
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "completed",
"content": [{"type": "output_text", "annotations": [], "text": "Paris"}],
},
],
"model": "o4-mini",
"stream": False,
}


def test_to_json() -> None:
class Model(BaseModel):
foo: Optional[str] = Field(alias="FOO", default=None)
Expand Down