Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 27 additions & 10 deletions dspy/clients/base_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,26 +217,43 @@ def _extract_citations_from_response(self, choice):

def _process_response(self, response):
"""Process the response of OpenAI Response API and extract outputs.

Args:
response: OpenAI Response API response
https://platform.openai.com/docs/api-reference/responses/object

Returns:
List of processed outputs
List of processed outputs, which is always of size 1 because the Response API only supports one output.
"""
outputs = []
text_outputs = []
tool_calls = []
reasoning_contents = []

for output_item in response.output:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Maybe add a comment that even though the output field of the responses API is a list, all output items belong to a single choice

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done!

if output_item.type == "message":
output_item_type = output_item.type
if output_item_type == "message":
for content_item in output_item.content:
outputs.append(content_item.text)
elif output_item.type == "function_call":
text_outputs.append(content_item.text)
elif output_item_type == "function_call":
tool_calls.append(output_item.model_dump())
elif output_item_type == "reasoning":
if getattr(output_item, "content", None) and len(output_item.content) > 0:
Copy link
Collaborator

@TomeHirata TomeHirata Oct 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

q: is it possible that output_item does not have content attribute when the item type is reasoning?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

technically it's possible that either content or summary is unset, but again OpenAI is doing an odd job documenting Responses API.

for content_item in output_item.content:
reasoning_contents.append(content_item.text)
elif getattr(output_item, "summary", None) and len(output_item.summary) > 0:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

for summary_item in output_item.summary:
reasoning_contents.append(summary_item.text)

result = {}
if len(text_outputs) > 0:
result["text"] = "".join(text_outputs)
if len(tool_calls) > 0:
result["tool_calls"] = tool_calls
if len(reasoning_contents) > 0:
result["reasoning_content"] = "".join(reasoning_contents)
# All `response.output` items map to one answer, so we return a list of size 1.
return [result]

if tool_calls:
outputs.append({"tool_calls": tool_calls})
return outputs


def inspect_history(n: int = 1):
Expand Down
51 changes: 33 additions & 18 deletions tests/clients/test_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
from litellm.types.llms.openai import ResponseAPIUsage, ResponsesAPIResponse
from litellm.utils import Choices, Message, ModelResponse
from openai import RateLimitError
from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem
from openai.types.responses.response_reasoning_item import Summary

import dspy
from dspy.utils.dummies import DummyLM
Expand Down Expand Up @@ -505,36 +507,49 @@ def test_disable_history():
model="openai/gpt-4o-mini",
)

def test_responses_api(litellm_test_server):
api_base, _ = litellm_test_server
expected_text = "This is a test answer from responses API."

def test_responses_api():
api_response = make_response(
output_blocks=[
{
"id": "msg_1",
"type": "message",
"role": "assistant",
"status": "completed",
"content": [
{"type": "output_text", "text": expected_text, "annotations": []}
],
}
ResponseOutputMessage(
**{
"id": "msg_1",
"type": "message",
"role": "assistant",
"status": "completed",
"content": [
{"type": "output_text", "text": "This is a test answer from responses API.", "annotations": []}
],
},
),
ResponseReasoningItem(
**{
"id": "reasoning_1",
"type": "reasoning",
"summary": [Summary(**{"type": "summary_text", "text": "This is a dummy reasoning."})],
},
),
]
)

with mock.patch("litellm.responses", autospec=True, return_value=api_response) as dspy_responses:
lm = dspy.LM(
model="openai/dspy-test-model",
api_base=api_base,
api_key="fakekey",
model="openai/gpt-5-mini",
model_type="responses",
cache=False,
temperature=1.0,
max_tokens=16000,
)
assert lm("openai query") == [expected_text]
lm_result = lm("openai query")

assert lm_result == [
{
"text": "This is a test answer from responses API.",
"reasoning_content": "This is a dummy reasoning.",
}
]

dspy_responses.assert_called_once()
assert dspy_responses.call_args.kwargs["model"] == "openai/dspy-test-model"
assert dspy_responses.call_args.kwargs["model"] == "openai/gpt-5-mini"


def test_lm_replaces_system_with_developer_role():
Expand Down