Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "0.4.16"
".": "0.4.17"
}
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
# Changelog

## 0.4.17 (2025-09-29)

Full Changelog: [v0.4.16...v0.4.17](https://github.com/scaleapi/agentex-python/compare/v0.4.16...v0.4.17)

### Chores

* **internal:** codegen related update ([2fdc0e7](https://github.com/scaleapi/agentex-python/commit/2fdc0e75ea3874cf896cdbb119b50a4165b2e942))
* **internal:** version bump ([0a59ad4](https://github.com/scaleapi/agentex-python/commit/0a59ad40b55b3577ef2addcea2fe4c0e4f002d49))
* **internal:** version bump ([6174ef1](https://github.com/scaleapi/agentex-python/commit/6174ef1573a539f5e0f57bc625a67da31311afb6))

## 0.4.16 (2025-09-16)

Full Changelog: [v0.4.15...v0.4.16](https://github.com/scaleapi/agentex-python/compare/v0.4.15...v0.4.16)
Expand Down
3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "agentex-sdk"
version = "0.4.16"
version = "0.4.17"
description = "The official Python library for the agentex API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down Expand Up @@ -86,7 +86,6 @@ dev-dependencies = [
"dirty-equals>=0.6.0",
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
"nest_asyncio==1.6.0",
"pytest-xdist>=3.6.1",
"debugpy>=1.8.15",
]
Expand Down
2 changes: 1 addition & 1 deletion src/agentex/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "agentex"
__version__ = "0.4.16" # x-release-please-version
__version__ = "0.4.17" # x-release-please-version
90 changes: 55 additions & 35 deletions src/agentex/lib/core/services/adk/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,6 @@ async def run_agent_auto_send(
},
) as span:
heartbeat_if_in_workflow("run agent auto send")

async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
Expand Down Expand Up @@ -396,12 +395,9 @@ async def run_agent_auto_send(
result = await Runner.run(
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
)
else:
result = await Runner.run(starting_agent=agent, input=input_list)

if span:
span.output = {
"new_items": [
item.raw_item.model_dump()
if isinstance(item.raw_item, BaseModel)
else item.raw_item
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
for item in result.new_items
],
Expand Down Expand Up @@ -431,7 +427,6 @@ async def run_agent_auto_send(

elif item.type == "tool_call_item":
tool_call_item = item.raw_item

# Extract tool call information using the helper method
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
tool_call_map[call_id] = tool_call_item
Expand Down Expand Up @@ -557,9 +552,15 @@ async def run_agent_streamed(
) as span:
heartbeat_if_in_workflow("run agent streamed")

async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
async with mcp_server_context(
mcp_server_params, mcp_timeout_seconds
) as servers:
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
handoffs = (
[Agent(**handoff.model_dump()) for handoff in handoffs]
if handoffs
else []
)
agent_kwargs = {
"name": agent_name,
"instructions": agent_instructions,
Expand All @@ -572,7 +573,9 @@ async def run_agent_streamed(
"tool_use_behavior": tool_use_behavior,
}
if model_settings is not None:
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
agent_kwargs["model_settings"] = (
model_settings.to_oai_model_settings()
)
if input_guardrails is not None:
agent_kwargs["input_guardrails"] = input_guardrails
if output_guardrails is not None:
Expand Down Expand Up @@ -600,7 +603,9 @@ async def run_agent_streamed(
if span:
span.output = {
"new_items": [
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
item.raw_item.model_dump()
if isinstance(item.raw_item, BaseModel)
else item.raw_item
for item in result.new_items
],
"final_output": result.final_output,
Expand Down Expand Up @@ -733,7 +738,6 @@ async def run_agent_streamed_auto_send(
if event.type == "run_item_stream_event":
if event.item.type == "tool_call_item":
tool_call_item = event.item.raw_item

# Extract tool call information using the helper method
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
tool_call_map[call_id] = tool_call_item
Expand All @@ -746,10 +750,12 @@ async def run_agent_streamed_auto_send(
)

# Create tool request using streaming context (immediate completion)
async with self.streaming_service.streaming_task_message_context(
task_id=task_id,
initial_content=tool_request_content,
) as streaming_context:
async with (
self.streaming_service.streaming_task_message_context(
task_id=task_id,
initial_content=tool_request_content,
) as streaming_context
):
# The message has already been persisted, but we still need to send an upda
await streaming_context.stream_update(
update=StreamTaskMessageFull(
Expand All @@ -775,9 +781,12 @@ async def run_agent_streamed_auto_send(
)

# Create tool response using streaming context (immediate completion)
async with self.streaming_service.streaming_task_message_context(
task_id=task_id, initial_content=tool_response_content
) as streaming_context:
async with (
self.streaming_service.streaming_task_message_context(
task_id=task_id,
initial_content=tool_response_content
) as streaming_context
):
# The message has already been persisted, but we still need to send an update
await streaming_context.stream_update(
update=StreamTaskMessageFull(
Expand All @@ -803,10 +812,14 @@ async def run_agent_streamed_auto_send(
),
)
# Open the streaming context
item_id_to_streaming_context[item_id] = await streaming_context.open()
item_id_to_streaming_context[
item_id
] = await streaming_context.open()
unclosed_item_ids.add(item_id)
else:
streaming_context = item_id_to_streaming_context[item_id]
streaming_context = item_id_to_streaming_context[
item_id
]

# Stream the delta through the streaming service
await streaming_context.stream_update(
Expand Down Expand Up @@ -836,10 +849,14 @@ async def run_agent_streamed_auto_send(
),
)
# Open the streaming context
item_id_to_streaming_context[item_id] = await streaming_context.open()
item_id_to_streaming_context[
item_id
] = await streaming_context.open()
unclosed_item_ids.add(item_id)
else:
streaming_context = item_id_to_streaming_context[item_id]
streaming_context = item_id_to_streaming_context[
item_id
]

# Stream the summary delta through the streaming service
await streaming_context.stream_update(
Expand Down Expand Up @@ -873,10 +890,14 @@ async def run_agent_streamed_auto_send(
),
)
# Open the streaming context
item_id_to_streaming_context[item_id] = await streaming_context.open()
item_id_to_streaming_context[
item_id
] = await streaming_context.open()
unclosed_item_ids.add(item_id)
else:
streaming_context = item_id_to_streaming_context[item_id]
streaming_context = item_id_to_streaming_context[
item_id
]

# Stream the content delta through the streaming service
await streaming_context.stream_update(
Expand Down Expand Up @@ -904,7 +925,6 @@ async def run_agent_streamed_auto_send(
# to close the streaming context, but they do!!!
# They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent
# I have no idea why they do this.

elif isinstance(event.data, ResponseReasoningTextDoneEvent):
# Handle reasoning content text completion
item_id = event.data.item_id
Expand All @@ -920,7 +940,9 @@ async def run_agent_streamed_auto_send(

# Finish the streaming context (sends DONE event and updates message)
if item_id in item_id_to_streaming_context:
streaming_context = item_id_to_streaming_context[item_id]
streaming_context = item_id_to_streaming_context[
item_id
]
await streaming_context.close()
if item_id in unclosed_item_ids:
unclosed_item_ids.remove(item_id)
Expand All @@ -930,17 +952,17 @@ async def run_agent_streamed_auto_send(
# Create a copy to avoid modifying set during iteration
remaining_items = list(unclosed_item_ids)
for item_id in remaining_items:
if (
item_id in unclosed_item_ids and item_id in item_id_to_streaming_context
): # Check if still unclosed
streaming_context = item_id_to_streaming_context[item_id]
if (item_id in unclosed_item_ids and
item_id in item_id_to_streaming_context): # Check if still unclosed
streaming_context = item_id_to_streaming_context[
item_id
]
await streaming_context.close()
unclosed_item_ids.discard(item_id)

except InputGuardrailTripwireTriggered as e:
# Handle guardrail trigger by sending a rejection message
rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."

# Try to extract rejection message from the guardrail result
if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"):
output_info = getattr(e.guardrail_result.output, "output_info", {})
Expand Down Expand Up @@ -971,7 +993,6 @@ async def run_agent_streamed_auto_send(
type="full",
),
)

# Re-raise to let the activity handle it
raise

Expand Down Expand Up @@ -1009,7 +1030,6 @@ async def run_agent_streamed_auto_send(
type="full",
),
)

# Re-raise to let the activity handle it
raise

Expand Down
1 change: 1 addition & 0 deletions src/agentex/lib/sdk/fastacp/fastacp.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

logger = make_logger(__name__)


class FastACP:
"""Factory for creating FastACP instances

Expand Down
53 changes: 6 additions & 47 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,10 @@
import os
import sys
import json
import time
import asyncio
import inspect
import subprocess
import tracemalloc
from typing import Any, Union, cast
from textwrap import dedent
from unittest import mock
from typing_extensions import Literal

Expand All @@ -23,14 +20,17 @@

from agentex import Agentex, AsyncAgentex, APIResponseValidationError
from agentex._types import Omit
from agentex._utils import asyncify
from agentex._models import BaseModel, FinalRequestOptions
from agentex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from agentex._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
OtherPlatform,
DefaultHttpxClient,
DefaultAsyncHttpxClient,
get_platform,
make_request_options,
)

Expand Down Expand Up @@ -1643,50 +1643,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

assert response.http_request.headers.get("x-stainless-retry-count") == "42"

def test_get_platform(self) -> None:
# A previous implementation of asyncify could leave threads unterminated when
# used with nest_asyncio.
#
# Since nest_asyncio.apply() is global and cannot be un-applied, this
# test is run in a separate process to avoid affecting other tests.
test_code = dedent("""
import asyncio
import nest_asyncio
import threading

from agentex._utils import asyncify
from agentex._base_client import get_platform

async def test_main() -> None:
result = await asyncify(get_platform)()
print(result)
for thread in threading.enumerate():
print(thread.name)

nest_asyncio.apply()
asyncio.run(test_main())
""")
with subprocess.Popen(
[sys.executable, "-c", test_code],
text=True,
) as process:
timeout = 10 # seconds

start_time = time.monotonic()
while True:
return_code = process.poll()
if return_code is not None:
if return_code != 0:
raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")

# success
break

if time.monotonic() - start_time > timeout:
process.kill()
raise AssertionError("calling get_platform using asyncify resulted in a hung process")

time.sleep(0.1)
async def test_get_platform(self) -> None:
platform = await asyncify(get_platform)()
assert isinstance(platform, (str, OtherPlatform))

async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
Expand Down
Loading