diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3a5048bc..9ce77a7d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.4.16" + ".": "0.4.17" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e582ba1b..29546073 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +## 0.4.17 (2025-09-29) + +Full Changelog: [v0.4.16...v0.4.17](https://github.com/scaleapi/agentex-python/compare/v0.4.16...v0.4.17) + +### Chores + +* **internal:** codegen related update ([2fdc0e7](https://github.com/scaleapi/agentex-python/commit/2fdc0e75ea3874cf896cdbb119b50a4165b2e942)) +* **internal:** version bump ([0a59ad4](https://github.com/scaleapi/agentex-python/commit/0a59ad40b55b3577ef2addcea2fe4c0e4f002d49)) +* **internal:** version bump ([6174ef1](https://github.com/scaleapi/agentex-python/commit/6174ef1573a539f5e0f57bc625a67da31311afb6)) + ## 0.4.16 (2025-09-16) Full Changelog: [v0.4.15...v0.4.16](https://github.com/scaleapi/agentex-python/compare/v0.4.15...v0.4.16) diff --git a/pyproject.toml b/pyproject.toml index 953a3320..a26cee0d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agentex-sdk" -version = "0.4.16" +version = "0.4.17" description = "The official Python library for the agentex API" dynamic = ["readme"] license = "Apache-2.0" @@ -86,7 +86,6 @@ dev-dependencies = [ "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "rich>=13.7.1", - "nest_asyncio==1.6.0", "pytest-xdist>=3.6.1", "debugpy>=1.8.15", ] diff --git a/src/agentex/_version.py b/src/agentex/_version.py index 41412071..4b363ffb 100644 --- a/src/agentex/_version.py +++ b/src/agentex/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "agentex" -__version__ = "0.4.16" # x-release-please-version +__version__ = "0.4.17" # x-release-please-version diff --git a/src/agentex/lib/core/services/adk/providers/openai.py b/src/agentex/lib/core/services/adk/providers/openai.py index 31631a94..23d482c0 100644 --- a/src/agentex/lib/core/services/adk/providers/openai.py +++ b/src/agentex/lib/core/services/adk/providers/openai.py @@ -358,7 +358,6 @@ async def run_agent_auto_send( }, ) as span: heartbeat_if_in_workflow("run agent auto send") - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: tools = [tool.to_oai_function_tool() for tool in tools] if tools else [] handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] @@ -396,12 +395,9 @@ async def run_agent_auto_send( result = await Runner.run( starting_agent=agent, input=input_list, previous_response_id=previous_response_id ) - else: - result = await Runner.run(starting_agent=agent, input=input_list) - - if span: - span.output = { - "new_items": [ + item.raw_item.model_dump() + if isinstance(item.raw_item, BaseModel) + else item.raw_item item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item for item in result.new_items ], @@ -431,7 +427,6 @@ async def run_agent_auto_send( elif item.type == "tool_call_item": tool_call_item = item.raw_item - # Extract tool call information using the helper method call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item) tool_call_map[call_id] = tool_call_item @@ -557,9 +552,15 @@ async def run_agent_streamed( ) as span: heartbeat_if_in_workflow("run agent streamed") - async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers: + async with mcp_server_context( + mcp_server_params, mcp_timeout_seconds + ) as servers: tools = [tool.to_oai_function_tool() for tool in tools] if tools else [] - handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else [] + handoffs = ( + [Agent(**handoff.model_dump()) for handoff in handoffs] + if handoffs + else [] + ) agent_kwargs = { "name": agent_name, "instructions": agent_instructions, @@ -572,7 +573,9 @@ async def run_agent_streamed( "tool_use_behavior": tool_use_behavior, } if model_settings is not None: - agent_kwargs["model_settings"] = model_settings.to_oai_model_settings() + agent_kwargs["model_settings"] = ( + model_settings.to_oai_model_settings() + ) if input_guardrails is not None: agent_kwargs["input_guardrails"] = input_guardrails if output_guardrails is not None: @@ -600,7 +603,9 @@ async def run_agent_streamed( if span: span.output = { "new_items": [ - item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item + item.raw_item.model_dump() + if isinstance(item.raw_item, BaseModel) + else item.raw_item for item in result.new_items ], "final_output": result.final_output, @@ -733,7 +738,6 @@ async def run_agent_streamed_auto_send( if event.type == "run_item_stream_event": if event.item.type == "tool_call_item": tool_call_item = event.item.raw_item - # Extract tool call information using the helper method call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item) tool_call_map[call_id] = tool_call_item @@ -746,10 +750,12 @@ async def run_agent_streamed_auto_send( ) # Create tool request using streaming context (immediate completion) - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, - initial_content=tool_request_content, - ) as streaming_context: + async with ( + self.streaming_service.streaming_task_message_context( + task_id=task_id, + initial_content=tool_request_content, + ) as streaming_context + ): # The message has already been persisted, but we still need to send an upda await streaming_context.stream_update( update=StreamTaskMessageFull( @@ -775,9 +781,12 @@ async def run_agent_streamed_auto_send( ) # Create tool response using streaming context (immediate completion) - async with self.streaming_service.streaming_task_message_context( - task_id=task_id, initial_content=tool_response_content - ) as streaming_context: + async with ( + self.streaming_service.streaming_task_message_context( + task_id=task_id, + initial_content=tool_response_content + ) as streaming_context + ): # The message has already been persisted, but we still need to send an update await streaming_context.stream_update( update=StreamTaskMessageFull( @@ -803,10 +812,14 @@ async def run_agent_streamed_auto_send( ), ) # Open the streaming context - item_id_to_streaming_context[item_id] = await streaming_context.open() + item_id_to_streaming_context[ + item_id + ] = await streaming_context.open() unclosed_item_ids.add(item_id) else: - streaming_context = item_id_to_streaming_context[item_id] + streaming_context = item_id_to_streaming_context[ + item_id + ] # Stream the delta through the streaming service await streaming_context.stream_update( @@ -836,10 +849,14 @@ async def run_agent_streamed_auto_send( ), ) # Open the streaming context - item_id_to_streaming_context[item_id] = await streaming_context.open() + item_id_to_streaming_context[ + item_id + ] = await streaming_context.open() unclosed_item_ids.add(item_id) else: - streaming_context = item_id_to_streaming_context[item_id] + streaming_context = item_id_to_streaming_context[ + item_id + ] # Stream the summary delta through the streaming service await streaming_context.stream_update( @@ -873,10 +890,14 @@ async def run_agent_streamed_auto_send( ), ) # Open the streaming context - item_id_to_streaming_context[item_id] = await streaming_context.open() + item_id_to_streaming_context[ + item_id + ] = await streaming_context.open() unclosed_item_ids.add(item_id) else: - streaming_context = item_id_to_streaming_context[item_id] + streaming_context = item_id_to_streaming_context[ + item_id + ] # Stream the content delta through the streaming service await streaming_context.stream_update( @@ -904,7 +925,6 @@ async def run_agent_streamed_auto_send( # to close the streaming context, but they do!!! # They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent # I have no idea why they do this. - elif isinstance(event.data, ResponseReasoningTextDoneEvent): # Handle reasoning content text completion item_id = event.data.item_id @@ -920,7 +940,9 @@ async def run_agent_streamed_auto_send( # Finish the streaming context (sends DONE event and updates message) if item_id in item_id_to_streaming_context: - streaming_context = item_id_to_streaming_context[item_id] + streaming_context = item_id_to_streaming_context[ + item_id + ] await streaming_context.close() if item_id in unclosed_item_ids: unclosed_item_ids.remove(item_id) @@ -930,17 +952,17 @@ async def run_agent_streamed_auto_send( # Create a copy to avoid modifying set during iteration remaining_items = list(unclosed_item_ids) for item_id in remaining_items: - if ( - item_id in unclosed_item_ids and item_id in item_id_to_streaming_context - ): # Check if still unclosed - streaming_context = item_id_to_streaming_context[item_id] + if (item_id in unclosed_item_ids and + item_id in item_id_to_streaming_context): # Check if still unclosed + streaming_context = item_id_to_streaming_context[ + item_id + ] await streaming_context.close() unclosed_item_ids.discard(item_id) except InputGuardrailTripwireTriggered as e: # Handle guardrail trigger by sending a rejection message rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question." - # Try to extract rejection message from the guardrail result if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"): output_info = getattr(e.guardrail_result.output, "output_info", {}) @@ -971,7 +993,6 @@ async def run_agent_streamed_auto_send( type="full", ), ) - # Re-raise to let the activity handle it raise @@ -1009,7 +1030,6 @@ async def run_agent_streamed_auto_send( type="full", ), ) - # Re-raise to let the activity handle it raise diff --git a/src/agentex/lib/sdk/fastacp/fastacp.py b/src/agentex/lib/sdk/fastacp/fastacp.py index 2b657ad7..fc2e8cb4 100644 --- a/src/agentex/lib/sdk/fastacp/fastacp.py +++ b/src/agentex/lib/sdk/fastacp/fastacp.py @@ -23,6 +23,7 @@ logger = make_logger(__name__) + class FastACP: """Factory for creating FastACP instances diff --git a/tests/test_client.py b/tests/test_client.py index da5399da..32b286d7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,13 +6,10 @@ import os import sys import json -import time import asyncio import inspect -import subprocess import tracemalloc from typing import Any, Union, cast -from textwrap import dedent from unittest import mock from typing_extensions import Literal @@ -23,14 +20,17 @@ from agentex import Agentex, AsyncAgentex, APIResponseValidationError from agentex._types import Omit +from agentex._utils import asyncify from agentex._models import BaseModel, FinalRequestOptions from agentex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError from agentex._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + OtherPlatform, DefaultHttpxClient, DefaultAsyncHttpxClient, + get_platform, make_request_options, ) @@ -1643,50 +1643,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" - def test_get_platform(self) -> None: - # A previous implementation of asyncify could leave threads unterminated when - # used with nest_asyncio. - # - # Since nest_asyncio.apply() is global and cannot be un-applied, this - # test is run in a separate process to avoid affecting other tests. - test_code = dedent(""" - import asyncio - import nest_asyncio - import threading - - from agentex._utils import asyncify - from agentex._base_client import get_platform - - async def test_main() -> None: - result = await asyncify(get_platform)() - print(result) - for thread in threading.enumerate(): - print(thread.name) - - nest_asyncio.apply() - asyncio.run(test_main()) - """) - with subprocess.Popen( - [sys.executable, "-c", test_code], - text=True, - ) as process: - timeout = 10 # seconds - - start_time = time.monotonic() - while True: - return_code = process.poll() - if return_code is not None: - if return_code != 0: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - - # success - break - - if time.monotonic() - start_time > timeout: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") - - time.sleep(0.1) + async def test_get_platform(self) -> None: + platform = await asyncify(get_platform)() + assert isinstance(platform, (str, OtherPlatform)) async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: # Test that the proxy environment variables are set correctly