-
Notifications
You must be signed in to change notification settings - Fork 2.3k
Description
Get this error when trying to run the basic example for routing. I'm using version 0.0.4 of Agents SDK and Python 3.9.6.
Describe the bug
Hi! We speak French, Spanish and English. How can I help? Hi! Error streaming response: 'AsyncOpenAI' object has no attribute 'responses' Traceback (most recent call last): File "/Users/valeria/github2025/automl_web/backend/swarms.py", line 73, in <module> asyncio.run(main()) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete return future.result() File "/Users/valeria/github2025/automl_web/backend/swarms.py", line 55, in main async for event in result.stream_events(): File "/Users/valeria/github2025/automl_web/backend/automl/lib/python3.9/site-packages/agents/result.py", line 182, in stream_events raise self._stored_exception File "/Users/valeria/github2025/automl_web/backend/automl/lib/python3.9/site-packages/agents/run.py", line 537, in _run_streamed_impl turn_result = await cls._run_single_turn_streamed( File "/Users/valeria/github2025/automl_web/backend/automl/lib/python3.9/site-packages/agents/run.py", line 639, in _run_single_turn_streamed async for event in model.stream_response( File "/Users/valeria/github2025/automl_web/backend/automl/lib/python3.9/site-packages/agents/models/openai_responses.py", line 141, in stream_response stream = await self._fetch_response( File "/Users/valeria/github2025/automl_web/backend/automl/lib/python3.9/site-packages/agents/models/openai_responses.py", line 230, in _fetch_response return await self._client.responses.create( AttributeError: 'AsyncOpenAI' object has no attribute 'responses'
Repro steps
Basic routing code taken from examples:
import asyncio
import uuid
from openai.types.responses import ResponseContentPartDoneEvent, ResponseTextDeltaEvent
from agents import Agent, RawResponsesStreamEvent, Runner, TResponseInputItem, trace, set_tracing_disabled
from dotenv import load_dotenv
import os
set_tracing_disabled(disabled=True)
"""
This example shows the handoffs/routing pattern. The triage agent receives the first message, and
then hands off to the appropriate agent based on the language of the request. Responses are
streamed to the user.
"""
load_dotenv()
french_agent = Agent(
name="french_agent",
instructions="You only speak French",
)
spanish_agent = Agent(
name="spanish_agent",
instructions="You only speak Spanish",
)
english_agent = Agent(
name="english_agent",
instructions="You only speak English",
)
triage_agent = Agent(
name="triage_agent",
instructions="Handoff to the appropriate agent based on the language of the request.",
handoffs=[french_agent, spanish_agent, english_agent],
)
async def main():
# We'll create an ID for this conversation, so we can link each trace
conversation_id = str(uuid.uuid4().hex[:16])
msg = input("Hi! We speak French, Spanish and English. How can I help? ")
agent = triage_agent
inputs: list[TResponseInputItem] = [{"content": msg, "role": "user"}]
while True:
# Each conversation turn is a single trace. Normally, each input from the user would be an
# API request to your app, and you can wrap the request in a trace()
with trace("Routing example", group_id=conversation_id):
result = Runner.run_streamed(
agent,
input=inputs,
)
async for event in result.stream_events():
if not isinstance(event, RawResponsesStreamEvent):
continue
data = event.data
if isinstance(data, ResponseTextDeltaEvent):
print(data.delta, end="", flush=True)
elif isinstance(data, ResponseContentPartDoneEvent):
print("\n")
inputs = result.to_input_list()
print("\n")
user_msg = input("Enter a message: ")
inputs.append({"content": user_msg, "role": "user"})
agent = result.current_agent
if __name__ == "__main__":
asyncio.run(main())