From e6344888335da039bd66b24e9d35f1e548f281fc Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Tue, 7 Apr 2026 20:44:30 -0700 Subject: [PATCH 01/13] Experiment --- python/.cspell.json | 1 + .../core/agent_framework/foundry/__init__.py | 2 + .../core/agent_framework/foundry/__init__.pyi | 2 + python/packages/core/pyproject.toml | 1 + python/packages/foundry_hosting/LICENSE | 21 ++ python/packages/foundry_hosting/README.md | 0 .../__init__.py | 12 ++ .../_responses.py | 109 +++++++++++ .../_shared.py | 181 ++++++++++++++++++ .../packages/foundry_hosting/pyproject.toml | 99 ++++++++++ python/pyproject.toml | 9 + .../foundry_vnext/helloworld/.dockerignore | 6 + .../foundry_vnext/helloworld/Dockerfile | 16 ++ .../helloworld/agent.manifest.yaml | 16 ++ .../foundry_vnext/helloworld/agent.yaml | 8 + .../foundry_vnext/helloworld/main.py | 33 ++++ .../foundry_vnext/helloworld/requirements.txt | 2 + python/uv.lock | 123 +++++++++++- 18 files changed, 637 insertions(+), 4 deletions(-) create mode 100644 python/packages/foundry_hosting/LICENSE create mode 100644 python/packages/foundry_hosting/README.md create mode 100644 python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py create mode 100644 python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py create mode 100644 python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py create mode 100644 python/packages/foundry_hosting/pyproject.toml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt diff --git a/python/.cspell.json b/python/.cspell.json index a26cc7fed7..c5bf954d2b 100644 --- a/python/.cspell.json +++ b/python/.cspell.json @@ -24,6 +24,7 @@ ], "words": [ "aeiou", + "agentserver", "agui", "aiplatform", "azuredocindex", diff --git a/python/packages/core/agent_framework/foundry/__init__.py b/python/packages/core/agent_framework/foundry/__init__.py index b1d2b88450..8edaed5485 100644 --- a/python/packages/core/agent_framework/foundry/__init__.py +++ b/python/packages/core/agent_framework/foundry/__init__.py @@ -6,6 +6,7 @@ - ``agent-framework-anthropic`` - ``agent-framework-foundry`` - ``agent-framework-foundry-local`` +- ``agent-framework-foundry-hosting`` """ import importlib @@ -31,6 +32,7 @@ "RawFoundryEmbeddingClient": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_foundry_target": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_traces": ("agent_framework_foundry", "agent-framework-foundry"), + "ResponsesHost": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), } diff --git a/python/packages/core/agent_framework/foundry/__init__.pyi b/python/packages/core/agent_framework/foundry/__init__.pyi index 47eb92b3af..133378ea2a 100644 --- a/python/packages/core/agent_framework/foundry/__init__.pyi +++ b/python/packages/core/agent_framework/foundry/__init__.pyi @@ -20,6 +20,7 @@ from agent_framework_foundry import ( evaluate_foundry_target, evaluate_traces, ) +from agent_framework_foundry_hosting import ResponsesHost from agent_framework_foundry_local import ( FoundryLocalChatOptions, FoundryLocalClient, @@ -44,6 +45,7 @@ __all__ = [ "RawFoundryAgentChatClient", "RawFoundryChatClient", "RawFoundryEmbeddingClient", + "ResponsesHost", "evaluate_foundry_target", "evaluate_traces", ] diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index e4aa1f5e40..c6e7ef2b42 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -48,6 +48,7 @@ all = [ "agent-framework-durabletask", "agent-framework-foundry", "agent-framework-foundry-local", + "agent-framework-foundry-hosting", "agent-framework-github-copilot; python_version >= '3.11'", "agent-framework-lab", "agent-framework-mem0", diff --git a/python/packages/foundry_hosting/LICENSE b/python/packages/foundry_hosting/LICENSE new file mode 100644 index 0000000000..9e841e7a26 --- /dev/null +++ b/python/packages/foundry_hosting/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/python/packages/foundry_hosting/README.md b/python/packages/foundry_hosting/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py new file mode 100644 index 0000000000..595bdc8e1b --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib.metadata + +from ._responses import ResponsesHost + +try: + __version__ = importlib.metadata.version(__name__) +except importlib.metadata.PackageNotFoundError: + __version__ = "0.0.0" + +__all__ = ["ResponsesHost"] diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py new file mode 100644 index 0000000000..6e3afff5bc --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from collections.abc import AsyncIterable + +from agent_framework import Agent, HistoryProvider, Message +from azure.ai.agentserver.core import AgentHost +from azure.ai.agentserver.responses import ResponseContext, ResponseEventStream +from azure.ai.agentserver.responses.hosting import ResponseHandler +from azure.ai.agentserver.responses.models import CreateResponse, get_input_text +from typing_extensions import Any, Sequence + +from ._shared import extract_chat_options, to_messages + + +class ResponsesHostContextProvider(HistoryProvider): + def __init__(self, context: ResponseContext): + super().__init__("responses-host", load_messages=True) + self.context = context + + async def get_messages( + self, + session_id: str | None, + *, + state: dict[str, Any] | None = None, + **kwargs: Any, + ) -> list[Message]: + history = await self.context.get_history_async() + return to_messages(history) + + async def save_messages( + self, + session_id: str | None, + messages: Sequence[Message], + *, + state: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + pass + + +class ResponsesHost(AgentHost): + def __init__(self, agent: Agent, **kwargs: Any) -> None: + application_insights_connection_string = kwargs.pop("application_insights_connection_string", None) + graceful_shutdown_timeout = kwargs.pop("graceful_shutdown_timeout", None) + log_level = kwargs.pop("log_level", None) + super().__init__( + application_insights_connection_string=application_insights_connection_string, + graceful_shutdown_timeout=graceful_shutdown_timeout, + log_level=log_level, + ) + + self.agent = agent + self.response_handler = ResponseHandler(self) + self.response_handler.create_handler(self._handle_create) # type: ignore + + async def _handle_create( + self, + request: CreateResponse, + context: ResponseContext, + cancellation_signal: asyncio.Event, + ) -> AsyncIterable[dict[str, Any]]: + # Replace or add a history provider that has `load_messages=True` + history_provider_idx: list[int] = [] + for i, provider in enumerate(self.agent.context_providers): + if isinstance(provider, HistoryProvider) and provider.load_messages: + history_provider_idx.append(i) + + if not history_provider_idx: + self.agent.context_providers.append(ResponsesHostContextProvider(context)) + elif len(history_provider_idx) > 1: + # There shouldn't be more than one history provider with `load_messages=True` + raise RuntimeError("There shouldn't be more than one history provider with `load_messages=True`") + else: + self.agent.context_providers[history_provider_idx[0]] = ResponsesHostContextProvider(context) + + stream = ResponseEventStream( + response_id=context.response_id, + model=getattr(request, "model", None), + ) + + yield stream.emit_created() + yield stream.emit_in_progress() + + input_items = get_input_text(request) + + # Start the response + message_item = stream.add_output_item_message() + yield message_item.emit_added() + text_content = message_item.add_text_content() + yield text_content.emit_added() + + # Invoke the MAF agent + chat_options = extract_chat_options(request) + full_text = "" + async for update in self.agent.run( + input_items, + options=chat_options, + stream=True, + ): + full_text += update.text + yield text_content.emit_delta(update.text) + + # Complete the message + yield text_content.emit_done(full_text) + yield message_item.emit_content_done(text_content) + yield message_item.emit_done() + + yield stream.emit_completed() diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py new file mode 100644 index 0000000000..3cd64bed1d --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft. All rights reserved. + +from agent_framework import ChatOptions, Content, Message +from azure.ai.agentserver.responses.models import ( + ComputerScreenshotContent, + CreateResponse, + MessageContent, + MessageContentInputFileContent, + MessageContentInputImageContent, + MessageContentInputTextContent, + MessageContentOutputTextContent, + MessageContentReasoningTextContent, + MessageContentRefusalContent, + OutputItem, + OutputItemFunctionToolCall, + OutputItemFunctionToolCallOutput, + OutputItemMessage, + OutputItemOutputMessage, + OutputItemReasoningItem, + OutputMessageContent, + OutputMessageContentOutputTextContent, + OutputMessageContentRefusalContent, + SummaryTextContent, + TextContent, +) +from typing_extensions import Sequence, cast + + +def extract_chat_options(request: CreateResponse) -> ChatOptions: + """Extracts chat options from a CreateResponse request. + + Args: + request (CreateResponse): The CreateResponse request object containing the chat options. + + Returns: + ChatOptions: The extracted chat options. + """ + options = ChatOptions() + + if request.temperature is not None: + options["temperature"] = request.temperature + if request.top_p is not None: + options["top_p"] = request.top_p + if request.max_output_tokens is not None: + options["max_tokens"] = request.max_output_tokens + + return options + + +def to_messages(history: Sequence[OutputItem]) -> list[Message]: + """Converts a sequence of OutputItem objects to a list of Message objects. + + Args: + history (Sequence[OutputItem]): The sequence of OutputItem objects to convert. + + Returns: + list[Message]: The list of Message objects. + """ + messages: list[Message] = [] + for item in history: + messages.append(_to_message(item)) + return messages + + +def _to_message(item: OutputItem) -> Message: + """Converts an OutputItem to a Message. + + Args: + item (OutputItem): The OutputItem to convert. + + Returns: + Message: The converted Message. + + Raises: + ValueError: If the OutputItem type is not supported. + """ + if item.type == "output_message": + msg = cast(OutputItemOutputMessage, item) + contents = [_convert_output_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "message": + msg = cast(OutputItemMessage, item) + contents = [_convert_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "function_call": + fc = cast(OutputItemFunctionToolCall, item) + return Message( + role="assistant", + contents=[Content.from_function_call(fc.call_id, fc.name, arguments=fc.arguments)], + ) + + if item.type == "function_call_output": + fco = cast(OutputItemFunctionToolCallOutput, item) + output = fco.output if isinstance(fco.output, str) else str(fco.output) + return Message( + role="tool", + contents=[Content.from_function_result(fco.call_id, result=output)], + ) + + if item.type == "reasoning": + reasoning = cast(OutputItemReasoningItem, item) + contents: list[Content] = [] + if reasoning.summary: + for summary in reasoning.summary: + contents.append(Content.from_text(summary.text)) + return Message(role="assistant", contents=contents) + + raise ValueError(f"Unsupported OutputItem type: {item.type}") + + +def _convert_output_message_content(content: OutputMessageContent) -> Content: + """Converts an OutputMessageContent to a Content object. + + Args: + content (OutputMessageContent): The OutputMessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the OutputMessageContent type is not supported. + """ + if content.type == "output_text": + text_content = cast(OutputMessageContentOutputTextContent, content) + return Content.from_text(text_content.text) + if content.type == "refusal": + refusal_content = cast(OutputMessageContentRefusalContent, content) + return Content.from_text(refusal_content.refusal) + + raise ValueError(f"Unsupported OutputMessageContent type: {content.type}") + + +def _convert_message_content(content: MessageContent) -> Content: + """Converts a MessageContent to a Content object. + + Args: + content (MessageContent): The MessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the MessageContent type is not supported. + """ + if content.type == "input_text": + input_text = cast(MessageContentInputTextContent, content) + return Content.from_text(input_text.text) + if content.type == "output_text": + output_text = cast(MessageContentOutputTextContent, content) + return Content.from_text(output_text.text) + if content.type == "text": + text = cast(TextContent, content) + return Content.from_text(text.text) + if content.type == "summary_text": + summary = cast(SummaryTextContent, content) + return Content.from_text(summary.text) + if content.type == "refusal": + refusal = cast(MessageContentRefusalContent, content) + return Content.from_text(refusal.refusal) + if content.type == "reasoning_text": + reasoning = cast(MessageContentReasoningTextContent, content) + return Content.from_text_reasoning(text=reasoning.text) + if content.type == "input_image": + image = cast(MessageContentInputImageContent, content) + if image.image_url: + return Content.from_uri(image.image_url) + if image.file_id: + return Content.from_hosted_file(image.file_id) + if content.type == "input_file": + file = cast(MessageContentInputFileContent, content) + if file.file_url: + return Content.from_uri(file.file_url) + if file.file_id: + return Content.from_hosted_file(file.file_id, name=file.filename) + if content.type == "computer_screenshot": + screenshot = cast(ComputerScreenshotContent, content) + return Content.from_uri(screenshot.image_url) + + raise ValueError(f"Unsupported MessageContent type: {content.type}") diff --git a/python/packages/foundry_hosting/pyproject.toml b/python/packages/foundry_hosting/pyproject.toml new file mode 100644 index 0000000000..bce8bbfd69 --- /dev/null +++ b/python/packages/foundry_hosting/pyproject.toml @@ -0,0 +1,99 @@ +[project] +name = "agent-framework-foundry-hosting" +description = "Foundry Local integration for Microsoft Agent Framework." +authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] +readme = "README.md" +requires-python = ">=3.10" +version = "1.0.0b260402" +license-files = ["LICENSE"] +urls.homepage = "https://aka.ms/agent-framework" +urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" +urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true" +urls.issues = "https://github.com/microsoft/agent-framework/issues" +classifiers = [ + "License :: OSI Approved :: MIT License", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Typing :: Typed", +] +dependencies = [ + "agent-framework-core>=1.0.0,<2", + "azure-ai-agentserver-core==2.0.0a20260331006", + "azure-ai-agentserver-responses==1.0.0a20260331006", + "azure-ai-agentserver-invocations==1.0.0a20260331006" +] + +[tool.uv] +prerelease = "if-necessary-or-explicit" +environments = [ + "sys_platform == 'darwin'", + "sys_platform == 'linux'", + "sys_platform == 'win32'" +] + +[tool.uv-dynamic-versioning] +fallback-version = "0.0.0" + +[tool.pytest.ini_options] +testpaths = 'tests' +addopts = "-ra -q -r fEX" +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = [] +timeout = 120 +markers = [ + "integration: marks tests as integration tests that require external services", +] + +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.coverage.run] +omit = [ + "**/__init__.py" +] + +[tool.pyright] +extends = "../../pyproject.toml" +include = ["agent_framework_foundry_hosting"] +exclude = ['tests'] + +[tool.mypy] +plugins = ['pydantic.mypy'] +strict = true +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true +no_implicit_optional = true +check_untyped_defs = true +warn_return_any = true +show_error_codes = true +warn_unused_ignores = false +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +[tool.bandit] +targets = ["agent_framework_foundry_hosting"] +exclude_dirs = ["tests"] + +[tool.poe] +executor.type = "uv" +include = "../../shared_tasks.toml" + +[tool.poe.tasks.mypy] +help = "Run MyPy for this package." +cmd = "mypy --config-file $POE_ROOT/pyproject.toml agent_framework_foundry_hosting" + +[tool.poe.tasks.test] +help = "Run the default unit test suite for this package." +cmd = 'pytest -m "not integration" --cov=agent_framework_foundry_hosting --cov-report=term-missing:skip-covered tests' + +[build-system] +requires = ["flit-core >= 3.11,<4.0"] +build-backend = "flit_core.buildapi" \ No newline at end of file diff --git a/python/pyproject.toml b/python/pyproject.toml index 24af13b940..c2b692e23c 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -78,6 +78,7 @@ agent-framework-declarative = { workspace = true } agent-framework-devui = { workspace = true } agent-framework-durabletask = { workspace = true } agent-framework-foundry = { workspace = true } +agent-framework-foundry-hosting = { workspace = true } agent-framework-foundry-local = { workspace = true } agent-framework-lab = { workspace = true } agent-framework-mem0 = { workspace = true } @@ -88,6 +89,9 @@ agent-framework-redis = { workspace = true } agent-framework-github-copilot = { workspace = true } agent-framework-claude = { workspace = true } agent-framework-orchestrations = { workspace = true } +azure-ai-agentserver-responses = { index = "azure-sdk-feed" } +azure-ai-agentserver-invocations = { index = "azure-sdk-feed" } +azure-ai-agentserver-core = { index = "azure-sdk-feed" } litellm = { url = "https://files.pythonhosted.org/packages/57/77/0c6eca2cb049793ddf8ce9cdcd5123a35666c4962514788c4fc90edf1d3b/litellm-1.82.1-py3-none-any.whl" } [tool.ruff] @@ -418,6 +422,11 @@ url = "https://test.pypi.org/simple/" publish-url = "https://test.pypi.org/legacy/" explicit = true +[[tool.uv.index]] +name = "azure-sdk-feed" +url = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" +explicit = true + [tool.flit.module] name = "agent_framework_meta" diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml new file mode 100644 index 0000000000..ad074d6b89 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml @@ -0,0 +1,16 @@ +name: echo-agent-responses-streaming +description: > + A simple echo agent that streams responses word-by-word using the + azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: echo-agent-responses-streaming + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml new file mode 100644 index 0000000000..3738c690a1 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: echo-agent-responses-streaming +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py new file mode 100644 index 0000000000..f1e1972393 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, ResponsesHost +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + name="HelloAgent", + instructions="You are a friendly assistant. Keep your answers brief.", + ) + + server = ResponsesHost(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/uv.lock b/python/uv.lock index fef2474e59..a713525a20 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -42,6 +42,7 @@ members = [ "agent-framework-devui", "agent-framework-durabletask", "agent-framework-foundry", + "agent-framework-foundry-hosting", "agent-framework-foundry-local", "agent-framework-github-copilot", "agent-framework-lab", @@ -343,6 +344,7 @@ all = [ { name = "agent-framework-devui", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-durabletask", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-foundry", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "agent-framework-foundry-hosting", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-foundry-local", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "agent-framework-github-copilot", marker = "(python_full_version >= '3.11' and sys_platform == 'darwin') or (python_full_version >= '3.11' and sys_platform == 'linux') or (python_full_version >= '3.11' and sys_platform == 'win32')" }, { name = "agent-framework-lab", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, @@ -371,6 +373,7 @@ requires-dist = [ { name = "agent-framework-devui", marker = "extra == 'all'", editable = "packages/devui" }, { name = "agent-framework-durabletask", marker = "extra == 'all'", editable = "packages/durabletask" }, { name = "agent-framework-foundry", marker = "extra == 'all'", editable = "packages/foundry" }, + { name = "agent-framework-foundry-hosting", marker = "extra == 'all'", editable = "packages/foundry_hosting" }, { name = "agent-framework-foundry-local", marker = "extra == 'all'", editable = "packages/foundry_local" }, { name = "agent-framework-github-copilot", marker = "python_full_version >= '3.11' and extra == 'all'", editable = "packages/github_copilot" }, { name = "agent-framework-lab", marker = "extra == 'all'", editable = "packages/lab" }, @@ -497,6 +500,25 @@ requires-dist = [ { name = "azure-ai-projects", specifier = ">=2.0.0,<3.0" }, ] +[[package]] +name = "agent-framework-foundry-hosting" +version = "1.0.0b260402" +source = { editable = "packages/foundry_hosting" } +dependencies = [ + { name = "agent-framework-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-invocations", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-ai-agentserver-responses", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] + +[package.metadata] +requires-dist = [ + { name = "agent-framework-core", editable = "packages/core" }, + { name = "azure-ai-agentserver-core", specifier = "==2.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, + { name = "azure-ai-agentserver-invocations", specifier = "==1.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, + { name = "azure-ai-agentserver-responses", specifier = "==1.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, +] + [[package]] name = "agent-framework-foundry-local" version = "1.0.0b260402" @@ -996,6 +1018,46 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/64/b4/17d4b0b2a2dc85a6df63d1157e028ed19f90d4cd97c36717afef2bc2f395/attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309", size = 67548, upload-time = "2026-03-19T14:22:23.645Z" }, ] +[[package]] +name = "azure-ai-agentserver-core" +version = "2.0.0a20260331006" +source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +dependencies = [ + { name = "hypercorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-core/2a20260331006/azure_ai_agentserver_core-2.0.0a20260331006.tar.gz", hash = "sha256:97ec317f1c1266eb60d82ae02827220b2453fcabf2d7698c0ffefac8ade20a2f" } +wheels = [ + { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-core/2a20260331006/azure_ai_agentserver_core-2.0.0a20260331006-py3-none-any.whl", hash = "sha256:f475bf8101a41eef9d3189b3376842ece9add6277394fdbe78e74e5960c80c34" }, +] + +[[package]] +name = "azure-ai-agentserver-invocations" +version = "1.0.0a20260331006" +source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +dependencies = [ + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-invocations/1a20260331006/azure_ai_agentserver_invocations-1.0.0a20260331006.tar.gz", hash = "sha256:dd4d511a24388c79b937f960590569495076b889272b780746ef38c267e4f645" } +wheels = [ + { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-invocations/1a20260331006/azure_ai_agentserver_invocations-1.0.0a20260331006-py3-none-any.whl", hash = "sha256:615b49b73c448ea6c6c5f26e8be719d78e50a8f432fc89d4cff27397bd0297f0" }, +] + +[[package]] +name = "azure-ai-agentserver-responses" +version = "1.0.0a20260331006" +source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +dependencies = [ + { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-responses/1a20260331006/azure_ai_agentserver_responses-1.0.0a20260331006.tar.gz", hash = "sha256:86f390c622697b5977d090cae182c929d4221abd5ddaf4838f9e0d3e0b4a2b2b" } +wheels = [ + { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-responses/1a20260331006/azure_ai_agentserver_responses-1.0.0a20260331006-py3-none-any.whl", hash = "sha256:ce84c3d6ce7a68c8ffcaee37bef45dc43199a66aa50fb876efc52d45e6a92925" }, +] + [[package]] name = "azure-ai-inference" version = "1.0.0b9" @@ -1423,7 +1485,7 @@ name = "clr-loader" version = "0.2.10" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/18/24/c12faf3f61614b3131b5c98d3bf0d376b49c7feaa73edca559aeb2aee080/clr_loader-0.2.10.tar.gz", hash = "sha256:81f114afbc5005bafc5efe5af1341d400e22137e275b042a8979f3feb9fc9446", size = 83605, upload-time = "2026-01-03T23:13:06.984Z" } wheels = [ @@ -2668,6 +2730,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a9/ae/8a3a16ea4d202cb641b51d2681bdd3d482c1c592d7570b3fa264730829ce/huggingface_hub-1.8.0-py3-none-any.whl", hash = "sha256:d3eb5047bd4e33c987429de6020d4810d38a5bef95b3b40df9b17346b7f353f2", size = 625208, upload-time = "2026-03-25T16:01:26.603Z" }, ] +[[package]] +name = "hypercorn" +version = "0.18.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "h11", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "h2", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "priority", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "taskgroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "tomli", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "wsproto", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/44/01/39f41a014b83dd5c795217362f2ca9071cf243e6a75bdcd6cd5b944658cc/hypercorn-0.18.0.tar.gz", hash = "sha256:d63267548939c46b0247dc8e5b45a9947590e35e64ee73a23c074aa3cf88e9da", size = 68420, upload-time = "2025-11-08T13:54:04.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/35/850277d1b17b206bd10874c8a9a3f52e059452fb49bb0d22cbb908f6038b/hypercorn-0.18.0-py3-none-any.whl", hash = "sha256:225e268f2c1c2f28f6d8f6db8f40cb8c992963610c5725e13ccfcddccb24b1cd", size = 61640, upload-time = "2025-11-08T13:54:03.202Z" }, +] + [[package]] name = "hyperframe" version = "6.1.0" @@ -4673,8 +4754,8 @@ name = "powerfx" version = "0.0.34" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "cffi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, - { name = "pythonnet", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "cffi", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, + { name = "pythonnet", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9f/fb/6c4bf87e0c74ca1c563921ce89ca1c5785b7576bca932f7255cdf81082a7/powerfx-0.0.34.tar.gz", hash = "sha256:956992e7afd272657ed16d80f4cad24ec95d9e4a79fb9dfa4a068a09e136af32", size = 3237555, upload-time = "2025-12-22T15:50:59.682Z" } wheels = [ @@ -4705,6 +4786,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/53/05/9cca1708bb8c65264124eb4b04251e0f65ce5bfc707080bb6b492d5a0df7/prek-0.3.8-py3-none-win_arm64.whl", hash = "sha256:a2614647aeafa817a5802ccb9561e92eedc20dcf840639a1b00826e2c2442515", size = 5190872, upload-time = "2026-03-23T08:23:29.463Z" }, ] +[[package]] +name = "priority" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/3c/eb7c35f4dcede96fca1842dac5f4f5d15511aa4b52f3a961219e68ae9204/priority-2.0.0.tar.gz", hash = "sha256:c965d54f1b8d0d0b19479db3924c7c36cf672dbf2aec92d43fbdaf4492ba18c0", size = 24792, upload-time = "2021-06-27T10:15:05.487Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/5f/82c8074f7e84978129347c2c6ec8b6c59f3584ff1a20bc3c940a3e061790/priority-2.0.0-py3-none-any.whl", hash = "sha256:6f8eefce5f3ad59baf2c080a664037bb4725cd0a790d53d59ab4059288faf6aa", size = 8946, upload-time = "2021-06-27T10:15:03.856Z" }, +] + [[package]] name = "propcache" version = "0.4.1" @@ -5341,7 +5431,7 @@ name = "pythonnet" version = "3.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "clr-loader", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "clr-loader", marker = "(python_full_version < '3.14' and sys_platform == 'darwin') or (python_full_version < '3.14' and sys_platform == 'linux') or (python_full_version < '3.14' and sys_platform == 'win32')" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/d6/1afd75edd932306ae9bd2c2d961d603dc2b52fcec51b04afea464f1f6646/pythonnet-3.0.5.tar.gz", hash = "sha256:48e43ca463941b3608b32b4e236db92d8d40db4c58a75ace902985f76dac21cf", size = 239212, upload-time = "2024-12-13T08:30:44.393Z" } wheels = [ @@ -6351,6 +6441,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/55/db07de81b5c630da5cbf5c7df646580ca26dfaefa593667fc6f2fe016d2e/tabulate-0.10.0-py3-none-any.whl", hash = "sha256:f0b0622e567335c8fabaaa659f1b33bcb6ddfe2e496071b743aa113f8774f2d3", size = 39814, upload-time = "2026-03-04T18:55:31.284Z" }, ] +[[package]] +name = "taskgroup" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, + { name = "typing-extensions", marker = "(python_full_version < '3.11' and sys_platform == 'darwin') or (python_full_version < '3.11' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'win32')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f0/8d/e218e0160cc1b692e6e0e5ba34e8865dbb171efeb5fc9a704544b3020605/taskgroup-0.2.2.tar.gz", hash = "sha256:078483ac3e78f2e3f973e2edbf6941374fbea81b9c5d0a96f51d297717f4752d", size = 11504, upload-time = "2025-01-03T09:24:13.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b1/74babcc824a57904e919f3af16d86c08b524c0691504baf038ef2d7f655c/taskgroup-0.2.2-py2.py3-none-any.whl", hash = "sha256:e2c53121609f4ae97303e9ea1524304b4de6faf9eb2c9280c7f87976479a52fb", size = 14237, upload-time = "2025-01-03T09:24:11.41Z" }, +] + [[package]] name = "tau2" version = "0.0.1" @@ -7050,6 +7153,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1f/f6/a933bd70f98e9cf3e08167fc5cd7aaaca49147e48411c0bd5ae701bb2194/wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22", size = 23591, upload-time = "2025-08-12T05:53:20.674Z" }, ] +[[package]] +name = "wsproto" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "h11", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/79/12135bdf8b9c9367b8701c2c19a14c913c120b882d50b014ca0d38083c2c/wsproto-1.3.2.tar.gz", hash = "sha256:b86885dcf294e15204919950f666e06ffc6c7c114ca900b060d6e16293528294", size = 50116, upload-time = "2025-11-20T18:18:01.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/f5/10b68b7b1544245097b2a1b8238f66f2fc6dcaeb24ba5d917f52bd2eed4f/wsproto-1.3.2-py3-none-any.whl", hash = "sha256:61eea322cdf56e8cc904bd3ad7573359a242ba65688716b0710a5eb12beab584", size = 24405, upload-time = "2025-11-20T18:18:00.454Z" }, +] + [[package]] name = "yarl" version = "1.23.0" From 951f61abb6ab8edbd6616f4065fda90bd99e52f5 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 8 Apr 2026 10:38:50 -0700 Subject: [PATCH 02/13] Update dependency and add non streaming --- .../core/agent_framework/foundry/__init__.py | 2 +- .../core/agent_framework/foundry/__init__.pyi | 4 +- .../__init__.py | 4 +- .../_responses.py | 75 ++++++++++++----- .../_shared.py | 4 +- .../packages/foundry_hosting/pyproject.toml | 6 +- python/pyproject.toml | 11 +-- .../foundry_vnext/helloworld/main.py | 5 +- python/uv.lock | 81 ++++++++++++++----- 9 files changed, 128 insertions(+), 64 deletions(-) diff --git a/python/packages/core/agent_framework/foundry/__init__.py b/python/packages/core/agent_framework/foundry/__init__.py index 8edaed5485..f438b2ab10 100644 --- a/python/packages/core/agent_framework/foundry/__init__.py +++ b/python/packages/core/agent_framework/foundry/__init__.py @@ -32,7 +32,7 @@ "RawFoundryEmbeddingClient": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_foundry_target": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_traces": ("agent_framework_foundry", "agent-framework-foundry"), - "ResponsesHost": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), + "ResponsesHostServer": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), } diff --git a/python/packages/core/agent_framework/foundry/__init__.pyi b/python/packages/core/agent_framework/foundry/__init__.pyi index 133378ea2a..2d2a5e2920 100644 --- a/python/packages/core/agent_framework/foundry/__init__.pyi +++ b/python/packages/core/agent_framework/foundry/__init__.pyi @@ -20,7 +20,7 @@ from agent_framework_foundry import ( evaluate_foundry_target, evaluate_traces, ) -from agent_framework_foundry_hosting import ResponsesHost +from agent_framework_foundry_hosting import ResponsesHostServer from agent_framework_foundry_local import ( FoundryLocalChatOptions, FoundryLocalClient, @@ -45,7 +45,7 @@ __all__ = [ "RawFoundryAgentChatClient", "RawFoundryChatClient", "RawFoundryEmbeddingClient", - "ResponsesHost", + "ResponsesHostServer", "evaluate_foundry_target", "evaluate_traces", ] diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py index 595bdc8e1b..bad5e56fe0 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py @@ -2,11 +2,11 @@ import importlib.metadata -from ._responses import ResponsesHost +from ._responses import ResponsesHostServer try: __version__ = importlib.metadata.version(__name__) except importlib.metadata.PackageNotFoundError: __version__ = "0.0.0" -__all__ = ["ResponsesHost"] +__all__ = ["ResponsesHostServer"] diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index 6e3afff5bc..1b962b63e0 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -4,9 +4,13 @@ from collections.abc import AsyncIterable from agent_framework import Agent, HistoryProvider, Message -from azure.ai.agentserver.core import AgentHost -from azure.ai.agentserver.responses import ResponseContext, ResponseEventStream -from azure.ai.agentserver.responses.hosting import ResponseHandler +from azure.ai.agentserver.responses import ( + ResponseContext, + ResponseEventStream, + ResponseProviderProtocol, + ResponsesServerOptions, +) +from azure.ai.agentserver.responses.hosting import ResponsesAgentServerHost from azure.ai.agentserver.responses.models import CreateResponse, get_input_text from typing_extensions import Any, Sequence @@ -14,7 +18,14 @@ class ResponsesHostContextProvider(HistoryProvider): + """A history provider that retrieves messages from a ResponseContext.""" + def __init__(self, context: ResponseContext): + """Initialize a ResponsesHostContextProvider. + + Args: + context: The ResponseContext to retrieve messages from. + """ super().__init__("responses-host", load_messages=True) self.context = context @@ -25,7 +36,7 @@ async def get_messages( state: dict[str, Any] | None = None, **kwargs: Any, ) -> list[Message]: - history = await self.context.get_history_async() + history = await self.context.get_history() return to_messages(history) async def save_messages( @@ -39,20 +50,36 @@ async def save_messages( pass -class ResponsesHost(AgentHost): - def __init__(self, agent: Agent, **kwargs: Any) -> None: - application_insights_connection_string = kwargs.pop("application_insights_connection_string", None) - graceful_shutdown_timeout = kwargs.pop("graceful_shutdown_timeout", None) - log_level = kwargs.pop("log_level", None) - super().__init__( - application_insights_connection_string=application_insights_connection_string, - graceful_shutdown_timeout=graceful_shutdown_timeout, - log_level=log_level, - ) +class ResponsesHostServer(ResponsesAgentServerHost): + """A responses server host for an agent.""" + + def __init__( + self, + agent: Agent, + *, + prefix: str = "", + options: ResponsesServerOptions | None = None, + provider: ResponseProviderProtocol | None = None, + **kwargs: Any, + ) -> None: + """Initialize a ResponsesHostServer. + + Args: + agent: The agent to handle responses for. + prefix: The URL prefix for the server. + options: Optional server options. + provider: Optional response provider. + **kwargs: Additional keyword arguments. + + Note: + If the agent has a history provider with `load_messages=True`, it will be + replaced with a `ResponsesHostContextProvider` that will retrieve history + from the hosting infrastructure. + """ + super().__init__(prefix=prefix, options=options, provider=provider, **kwargs) self.agent = agent - self.response_handler = ResponseHandler(self) - self.response_handler.create_handler(self._handle_create) # type: ignore + self.create_handler(self._handle_create) # pyright: ignore[reportUnknownMemberType] async def _handle_create( self, @@ -74,17 +101,21 @@ async def _handle_create( else: self.agent.context_providers[history_provider_idx[0]] = ResponsesHostContextProvider(context) - stream = ResponseEventStream( - response_id=context.response_id, - model=getattr(request, "model", None), - ) + input_items = get_input_text(request) + + stream = ResponseEventStream(response_id=context.response_id, model=request.model) yield stream.emit_created() yield stream.emit_in_progress() - input_items = get_input_text(request) + if request.stream is None or request.stream is False: + # Run the agent in non-streaming mode + response = await self.agent.run(input_items, stream=False) + for item in stream.output_item_message(response.text): + yield item + yield stream.emit_completed() - # Start the response + # Start the streaming response message_item = stream.add_output_item_message() yield message_item.emit_added() text_content = message_item.add_text_content() diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py index 3cd64bed1d..ec6d732915 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py @@ -4,6 +4,7 @@ from azure.ai.agentserver.responses.models import ( ComputerScreenshotContent, CreateResponse, + FunctionCallOutputItemParam, MessageContent, MessageContentInputFileContent, MessageContentInputImageContent, @@ -13,7 +14,6 @@ MessageContentRefusalContent, OutputItem, OutputItemFunctionToolCall, - OutputItemFunctionToolCallOutput, OutputItemMessage, OutputItemOutputMessage, OutputItemReasoningItem, @@ -92,7 +92,7 @@ def _to_message(item: OutputItem) -> Message: ) if item.type == "function_call_output": - fco = cast(OutputItemFunctionToolCallOutput, item) + fco = cast(FunctionCallOutputItemParam, item) output = fco.output if isinstance(fco.output, str) else str(fco.output) return Message( role="tool", diff --git a/python/packages/foundry_hosting/pyproject.toml b/python/packages/foundry_hosting/pyproject.toml index bce8bbfd69..1f0c42dbbf 100644 --- a/python/packages/foundry_hosting/pyproject.toml +++ b/python/packages/foundry_hosting/pyproject.toml @@ -24,9 +24,9 @@ classifiers = [ ] dependencies = [ "agent-framework-core>=1.0.0,<2", - "azure-ai-agentserver-core==2.0.0a20260331006", - "azure-ai-agentserver-responses==1.0.0a20260331006", - "azure-ai-agentserver-invocations==1.0.0a20260331006" + "azure-ai-agentserver-core", + "azure-ai-agentserver-responses", + "azure-ai-agentserver-invocations" ] [tool.uv] diff --git a/python/pyproject.toml b/python/pyproject.toml index c2b692e23c..4accd6d888 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -89,9 +89,9 @@ agent-framework-redis = { workspace = true } agent-framework-github-copilot = { workspace = true } agent-framework-claude = { workspace = true } agent-framework-orchestrations = { workspace = true } -azure-ai-agentserver-responses = { index = "azure-sdk-feed" } -azure-ai-agentserver-invocations = { index = "azure-sdk-feed" } -azure-ai-agentserver-core = { index = "azure-sdk-feed" } +azure-ai-agentserver-responses = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-responses" } +azure-ai-agentserver-invocations = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-invocations" } +azure-ai-agentserver-core = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-core" } litellm = { url = "https://files.pythonhosted.org/packages/57/77/0c6eca2cb049793ddf8ce9cdcd5123a35666c4962514788c4fc90edf1d3b/litellm-1.82.1-py3-none-any.whl" } [tool.ruff] @@ -422,11 +422,6 @@ url = "https://test.pypi.org/simple/" publish-url = "https://test.pypi.org/legacy/" explicit = true -[[tool.uv.index]] -name = "azure-sdk-feed" -url = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" -explicit = true - [tool.flit.module] name = "agent_framework_meta" diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py index f1e1972393..6b63158095 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py @@ -4,7 +4,8 @@ import os from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, ResponsesHost +from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider from azure.identity import AzureCliCredential from dotenv import load_dotenv @@ -25,7 +26,7 @@ def main(): instructions="You are a friendly assistant. Keep your answers brief.", ) - server = ResponsesHost(agent) + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) server.run() diff --git a/python/uv.lock b/python/uv.lock index a713525a20..b9cf525334 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -514,9 +514,9 @@ dependencies = [ [package.metadata] requires-dist = [ { name = "agent-framework-core", editable = "packages/core" }, - { name = "azure-ai-agentserver-core", specifier = "==2.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, - { name = "azure-ai-agentserver-invocations", specifier = "==1.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, - { name = "azure-ai-agentserver-responses", specifier = "==1.0.0a20260331006", index = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" }, + { name = "azure-ai-agentserver-core", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-core&branch=agentserver%2Fresponses" }, + { name = "azure-ai-agentserver-invocations", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-invocations&branch=agentserver%2Fresponses" }, + { name = "azure-ai-agentserver-responses", git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-responses&branch=agentserver%2Fresponses" }, ] [[package]] @@ -1020,43 +1020,34 @@ wheels = [ [[package]] name = "azure-ai-agentserver-core" -version = "2.0.0a20260331006" -source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +version = "2.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-core&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } dependencies = [ + { name = "azure-monitor-opentelemetry-exporter", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "hypercorn", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-api", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-exporter-otlp-proto-grpc", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "starlette", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-core/2a20260331006/azure_ai_agentserver_core-2.0.0a20260331006.tar.gz", hash = "sha256:97ec317f1c1266eb60d82ae02827220b2453fcabf2d7698c0ffefac8ade20a2f" } -wheels = [ - { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-core/2a20260331006/azure_ai_agentserver_core-2.0.0a20260331006-py3-none-any.whl", hash = "sha256:f475bf8101a41eef9d3189b3376842ece9add6277394fdbe78e74e5960c80c34" }, -] [[package]] name = "azure-ai-agentserver-invocations" -version = "1.0.0a20260331006" -source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +version = "1.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-invocations&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } dependencies = [ { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-invocations/1a20260331006/azure_ai_agentserver_invocations-1.0.0a20260331006.tar.gz", hash = "sha256:dd4d511a24388c79b937f960590569495076b889272b780746ef38c267e4f645" } -wheels = [ - { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-invocations/1a20260331006/azure_ai_agentserver_invocations-1.0.0a20260331006-py3-none-any.whl", hash = "sha256:615b49b73c448ea6c6c5f26e8be719d78e50a8f432fc89d4cff27397bd0297f0" }, -] [[package]] name = "azure-ai-agentserver-responses" -version = "1.0.0a20260331006" -source = { registry = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" } +version = "1.0.0b1" +source = { git = "https://github.com/Azure/azure-sdk-for-python.git?subdirectory=sdk%2Fagentserver%2Fazure-ai-agentserver-responses&branch=agentserver%2Fresponses#43579f686f51ebed23b066d06c90a544c0070a0b" } dependencies = [ { name = "azure-ai-agentserver-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, - { name = "httpx", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, { name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, ] -sdist = { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-responses/1a20260331006/azure_ai_agentserver_responses-1.0.0a20260331006.tar.gz", hash = "sha256:86f390c622697b5977d090cae182c929d4221abd5ddaf4838f9e0d3e0b4a2b2b" } -wheels = [ - { url = "https://pkgs.dev.azure.com/azure-sdk/29ec6040-b234-4e31-b139-33dc4287b756/_packaging/3572dbf9-b5ef-433b-9137-fc4d7768e7cc/pypi/download/azure-ai-agentserver-responses/1a20260331006/azure_ai_agentserver_responses-1.0.0a20260331006-py3-none-any.whl", hash = "sha256:ce84c3d6ce7a68c8ffcaee37bef45dc43199a66aa50fb876efc52d45e6a92925" }, -] [[package]] name = "azure-ai-inference" @@ -1170,6 +1161,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/49/9a/417b3a533e01953a7c618884df2cb05a71e7b68bdbce4fbdb62349d2a2e8/azure_identity-1.25.3-py3-none-any.whl", hash = "sha256:f4d0b956a8146f30333e071374171f3cfa7bdb8073adb8c3814b65567aa7447c", size = 192138, upload-time = "2026-03-13T01:12:22.951Z" }, ] +[[package]] +name = "azure-monitor-opentelemetry-exporter" +version = "1.0.0b51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "azure-identity", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "msrest", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-api", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "opentelemetry-sdk", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "psutil", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/a4/a6cd2d389bc1009300bcd57c9e2ace4b7e7ae1e5dc0bda415ee803629cf2/azure_monitor_opentelemetry_exporter-1.0.0b51.tar.gz", hash = "sha256:a6171c34326bcd6216938bb40d715c15f1f22984ac1986fc97231336d8ac4c3c", size = 319837, upload-time = "2026-04-06T21:45:46.378Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/1a/6b0b7a6181b42709103a65a676c89fd5055cb1d1b281ebe10c49254a170f/azure_monitor_opentelemetry_exporter-1.0.0b51-py2.py3-none-any.whl", hash = "sha256:6572cac11f96e3b18ae1187cb35cf3b40d0004655dae8048896c41c765bea530", size = 242104, upload-time = "2026-04-06T21:45:47.856Z" }, +] + [[package]] name = "azure-search-documents" version = "11.7.0b2" @@ -3685,6 +3693,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/75/bd9b7bb966668920f06b200e84454c8f3566b102183bc55c5473d96cb2b9/msal_extensions-1.3.1-py3-none-any.whl", hash = "sha256:96d3de4d034504e969ac5e85bae8106c8373b5c6568e4c8fa7af2eca9dbe6bca", size = 20583, upload-time = "2025-03-14T23:51:03.016Z" }, ] +[[package]] +name = "msrest" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "certifi", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests-oauthlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/77/8397c8fb8fc257d8ea0fa66f8068e073278c65f05acb17dcb22a02bfdc42/msrest-0.7.1.zip", hash = "sha256:6e7661f46f3afd88b75667b7187a92829924446c7ea1d169be8c4bb7eeb788b9", size = 175332, upload-time = "2022-06-13T22:41:25.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/cf/f2966a2638144491f8696c27320d5219f48a072715075d168b31d3237720/msrest-0.7.1-py3-none-any.whl", hash = "sha256:21120a810e1233e5e6cc7fe40b474eeb4ec6f757a15d7cf86702c369f9567c32", size = 85384, upload-time = "2022-06-13T22:41:22.42Z" }, +] + [[package]] name = "multidict" version = "6.7.1" @@ -5734,6 +5758,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" }, ] +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, + { name = "requests", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + [[package]] name = "rich" version = "13.9.4" From 359a721cf60ffe8f2b993e36f43abfa18ca92617 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 8 Apr 2026 11:19:15 -0700 Subject: [PATCH 03/13] Add more samples --- .../_responses.py | 16 ++--- .../_shared.py | 24 +------ .../helloworld/.dockerignore | 0 .../helloworld/Dockerfile | 0 .../helloworld/agent.manifest.yaml | 0 .../helloworld/agent.yaml | 0 .../helloworld/main.py | 1 - .../helloworld/requirements.txt | 0 .../hosted_mcp_tools/.dockerignore | 6 ++ .../hosted_mcp_tools/Dockerfile | 16 +++++ .../hosted_mcp_tools/agent.manifest.yaml | 16 +++++ .../hosted_mcp_tools/agent.yaml | 8 +++ .../hosted_mcp_tools/main.py | 65 +++++++++++++++++++ .../hosted_mcp_tools/requirements.txt | 2 + .../local_tools/.dockerignore | 6 ++ .../local_tools/Dockerfile | 16 +++++ .../local_tools/agent.manifest.yaml | 16 +++++ .../local_tools/agent.yaml | 8 +++ .../local_tools/main.py | 46 +++++++++++++ .../local_tools/requirements.txt | 2 + .../workflows/.dockerignore | 6 ++ .../workflows/Dockerfile | 16 +++++ .../workflows/agent.manifest.yaml | 16 +++++ .../workflows/agent.yaml | 8 +++ .../foundry_vnext_responses/workflows/main.py | 49 ++++++++++++++ .../workflows/requirements.txt | 2 + 26 files changed, 312 insertions(+), 33 deletions(-) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/Dockerfile (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/main.py (99%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext => foundry_vnext_responses}/helloworld/requirements.txt (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index 1b962b63e0..eca938afbd 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -3,7 +3,7 @@ import asyncio from collections.abc import AsyncIterable -from agent_framework import Agent, HistoryProvider, Message +from agent_framework import BaseAgent, HistoryProvider, Message, SupportsAgentRun from azure.ai.agentserver.responses import ( ResponseContext, ResponseEventStream, @@ -14,7 +14,7 @@ from azure.ai.agentserver.responses.models import CreateResponse, get_input_text from typing_extensions import Any, Sequence -from ._shared import extract_chat_options, to_messages +from ._shared import to_messages class ResponsesHostContextProvider(HistoryProvider): @@ -55,7 +55,7 @@ class ResponsesHostServer(ResponsesAgentServerHost): def __init__( self, - agent: Agent, + agent: BaseAgent, *, prefix: str = "", options: ResponsesServerOptions | None = None, @@ -78,6 +78,9 @@ def __init__( """ super().__init__(prefix=prefix, options=options, provider=provider, **kwargs) + if not isinstance(agent, SupportsAgentRun): + raise TypeError("Agent must support the SupportsAgentRun interface") + self.agent = agent self.create_handler(self._handle_create) # pyright: ignore[reportUnknownMemberType] @@ -122,13 +125,8 @@ async def _handle_create( yield text_content.emit_added() # Invoke the MAF agent - chat_options = extract_chat_options(request) full_text = "" - async for update in self.agent.run( - input_items, - options=chat_options, - stream=True, - ): + async for update in self.agent.run(input_items, stream=True): full_text += update.text yield text_content.emit_delta(update.text) diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py index ec6d732915..f76306af11 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py @@ -1,9 +1,8 @@ # Copyright (c) Microsoft. All rights reserved. -from agent_framework import ChatOptions, Content, Message +from agent_framework import Content, Message from azure.ai.agentserver.responses.models import ( ComputerScreenshotContent, - CreateResponse, FunctionCallOutputItemParam, MessageContent, MessageContentInputFileContent, @@ -26,27 +25,6 @@ from typing_extensions import Sequence, cast -def extract_chat_options(request: CreateResponse) -> ChatOptions: - """Extracts chat options from a CreateResponse request. - - Args: - request (CreateResponse): The CreateResponse request object containing the chat options. - - Returns: - ChatOptions: The extracted chat options. - """ - options = ChatOptions() - - if request.temperature is not None: - options["temperature"] = request.temperature - if request.top_p is not None: - options["top_p"] = request.top_p - if request.max_output_tokens is not None: - options["max_tokens"] = request.max_output_tokens - - return options - - def to_messages(history: Sequence[OutputItem]) -> list[Message]: """Converts a sequence of OutputItem objects to a list of Message objects. diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py similarity index 99% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py index 6b63158095..7475b287f5 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. - import os from agent_framework import Agent diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext/helloworld/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml new file mode 100644 index 0000000000..ad074d6b89 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml @@ -0,0 +1,16 @@ +name: echo-agent-responses-streaming +description: > + A simple echo agent that streams responses word-by-word using the + azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: echo-agent-responses-streaming + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml new file mode 100644 index 0000000000..3738c690a1 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: echo-agent-responses-streaming +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py new file mode 100644 index 0000000000..164ef87fcf --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py @@ -0,0 +1,65 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from agent_framework.orchestrations import GroupChatBuilder, GroupChatState +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def round_robin_selector(state: GroupChatState) -> str: + """A round-robin selector function that picks the next speaker based on the current round index.""" + + participant_names = list(state.participants.keys()) + return participant_names[state.current_round % len(participant_names)] + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + writer_agent = Agent( + client=client, + instructions=( + "You are an excellent content writer. You create new content and edit contents based on the feedback." + ), + name="writer", + ) + + reviewer_agent = Agent( + client=client, + instructions=( + "You are an excellent content reviewer." + "Provide actionable feedback to the writer about the provided content." + "Provide the feedback in the most concise manner possible." + ), + name="reviewer", + ) + + workflow_agent = ( + GroupChatBuilder( + participants=[writer_agent, reviewer_agent], + # Set a hard termination condition to stop after 4 messages: + # User message + writer message + reviewer message + writer message + termination_condition=lambda conversation: len(conversation) >= 4, + selection_func=round_robin_selector, + ) + .build() + .as_agent() + ) + + server = ResponsesHostServer(workflow_agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml new file mode 100644 index 0000000000..ad074d6b89 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml @@ -0,0 +1,16 @@ +name: echo-agent-responses-streaming +description: > + A simple echo agent that streams responses word-by-word using the + azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: echo-agent-responses-streaming + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml new file mode 100644 index 0000000000..3738c690a1 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: echo-agent-responses-streaming +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py new file mode 100644 index 0000000000..690f87d93c --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from random import randint + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field +from typing_extensions import Annotated + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + name="HelloAgent", + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[get_weather], + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml new file mode 100644 index 0000000000..ad074d6b89 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml @@ -0,0 +1,16 @@ +name: echo-agent-responses-streaming +description: > + A simple echo agent that streams responses word-by-word using the + azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: echo-agent-responses-streaming + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml new file mode 100644 index 0000000000..3738c690a1 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: echo-agent-responses-streaming +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py new file mode 100644 index 0000000000..1b92ec4669 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + github_pat = os.getenv("GITHUB_PAT") + if not github_pat: + raise ValueError( + "GITHUB_PAT environment variable must be set. Create a token at https://github.com/settings/tokens" + ) + + github_mcp_tool = client.get_mcp_tool( + name="GitHub", + url="https://api.githubcopilot.com/mcp/", + headers={ + "Authorization": f"Bearer {github_pat}", + }, + approval_mode="never_require", + ) + + agent = Agent( + client=client, + name="HelloAgent", + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[github_mcp_tool], + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file From b2fa3a63e71414403f61b1b4b0c6ddc975f9d5bb Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 8 Apr 2026 14:31:53 -0700 Subject: [PATCH 04/13] Rename samples --- .../{helloworld => 01-basic}/.dockerignore | 0 .../{helloworld => 01-basic}/Dockerfile | 0 .../01-basic/agent.manifest.yaml | 15 +++++++++++++++ .../{hosted_mcp_tools => 01-basic}/agent.yaml | 2 +- .../{helloworld => 01-basic}/main.py | 5 ++++- .../{helloworld => 01-basic}/requirements.txt | 0 .../.dockerignore | 0 .../Dockerfile | 0 .../02_local_tools/agent.manifest.yaml | 15 +++++++++++++++ .../{local_tools => 02_local_tools}/agent.yaml | 2 +- .../{local_tools => 02_local_tools}/main.py | 5 ++++- .../requirements.txt | 0 .../{local_tools => 03_remote_mcp}/.dockerignore | 0 .../{local_tools => 03_remote_mcp}/Dockerfile | 0 .../03_remote_mcp/agent.manifest.yaml | 15 +++++++++++++++ .../{workflows => 03_remote_mcp}/agent.yaml | 2 +- .../{workflows => 03_remote_mcp}/main.py | 5 ++++- .../requirements.txt | 0 .../{workflows => 04-workflows}/.dockerignore | 0 .../{workflows => 04-workflows}/Dockerfile | 0 .../04-workflows/agent.manifest.yaml | 15 +++++++++++++++ .../{helloworld => 04-workflows}/agent.yaml | 2 +- .../{hosted_mcp_tools => 04-workflows}/main.py | 8 ++++++++ .../{workflows => 04-workflows}/requirements.txt | 0 .../helloworld/agent.manifest.yaml | 16 ---------------- .../hosted_mcp_tools/agent.manifest.yaml | 16 ---------------- .../local_tools/agent.manifest.yaml | 16 ---------------- .../workflows/agent.manifest.yaml | 16 ---------------- 28 files changed, 84 insertions(+), 71 deletions(-) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{helloworld => 01-basic}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{helloworld => 01-basic}/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{hosted_mcp_tools => 01-basic}/agent.yaml (74%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{helloworld => 01-basic}/main.py (75%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{helloworld => 01-basic}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{hosted_mcp_tools => 02_local_tools}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{hosted_mcp_tools => 02_local_tools}/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{local_tools => 02_local_tools}/agent.yaml (70%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{local_tools => 02_local_tools}/main.py (82%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{hosted_mcp_tools => 02_local_tools}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{local_tools => 03_remote_mcp}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{local_tools => 03_remote_mcp}/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{workflows => 03_remote_mcp}/agent.yaml (68%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{workflows => 03_remote_mcp}/main.py (82%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{local_tools => 03_remote_mcp}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{workflows => 04-workflows}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{workflows => 04-workflows}/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{helloworld => 04-workflows}/agent.yaml (74%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{hosted_mcp_tools => 04-workflows}/main.py (79%) rename python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/{workflows => 04-workflows}/requirements.txt (100%) delete mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml delete mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml delete mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml delete mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml new file mode 100644 index 0000000000..27bb7630e8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-basic +description: > + A basic Agent Framework agent hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-basic + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.yaml similarity index 74% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.yaml index 3738c690a1..b42f31863a 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.yaml +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.yaml @@ -1,5 +1,5 @@ kind: hosted -name: echo-agent-responses-streaming +name: agent-framework-agent-basic protocols: - protocol: responses version: v0.1.0 diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/main.py similarity index 75% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/main.py index 7475b287f5..047719efd9 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/main.py @@ -21,8 +21,11 @@ def main(): agent = Agent( client=client, - name="HelloAgent", instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, ) server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml new file mode 100644 index 0000000000..84a6fd95f5 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-local-tools +description: > + An Agent Framework agent with local toolshosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-local-tools + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.yaml similarity index 70% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.yaml index 3738c690a1..a2642beb46 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.yaml +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.yaml @@ -1,5 +1,5 @@ kind: hosted -name: echo-agent-responses-streaming +name: agent-framework-agent-with-local-tools protocols: - protocol: responses version: v0.1.0 diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/main.py similarity index 82% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/main.py index 690f87d93c..6d213bf8c3 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/main.py @@ -33,9 +33,12 @@ def main(): agent = Agent( client=client, - name="HelloAgent", instructions="You are a friendly assistant. Keep your answers brief.", tools=[get_weather], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, ) server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml new file mode 100644 index 0000000000..daf7b10cd3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-remote-mcp-tools +description: > + An Agent Framework agent with remote MCP tools hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-remote-mcp-tools + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.yaml similarity index 68% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.yaml index 3738c690a1..9eaedced27 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.yaml +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.yaml @@ -1,5 +1,5 @@ kind: hosted -name: echo-agent-responses-streaming +name: agent-framework-agent-with-remote-mcp-tools protocols: - protocol: responses version: v0.1.0 diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/main.py similarity index 82% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/main.py index 1b92ec4669..455b13f1f2 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/main.py @@ -36,9 +36,12 @@ def main(): agent = Agent( client=client, - name="HelloAgent", instructions="You are a friendly assistant. Keep your answers brief.", tools=[github_mcp_tool], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, ) server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml new file mode 100644 index 0000000000..027ca3713b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-workflows +description: > + An Agent Framework workflow hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-workflows + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.yaml similarity index 74% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.yaml index 3738c690a1..ee4fedc2ea 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.yaml +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.yaml @@ -1,5 +1,5 @@ kind: hosted -name: echo-agent-responses-streaming +name: agent-framework-workflows protocols: - protocol: responses version: v0.1.0 diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/main.py similarity index 79% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/main.py index 164ef87fcf..9d6ed68158 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/main.py @@ -33,6 +33,10 @@ def main(): "You are an excellent content writer. You create new content and edit contents based on the feedback." ), name="writer", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, ) reviewer_agent = Agent( @@ -43,6 +47,10 @@ def main(): "Provide the feedback in the most concise manner possible." ), name="reviewer", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, ) workflow_agent = ( diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml deleted file mode 100644 index ad074d6b89..0000000000 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/helloworld/agent.manifest.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: echo-agent-responses-streaming -description: > - A simple echo agent that streams responses word-by-word using the - azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). -metadata: - tags: - - AI Agent Hosting - - Azure AI AgentServer - - Responses Protocol - - Streaming -template: - name: echo-agent-responses-streaming - kind: hosted - protocols: - - protocol: responses - version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml deleted file mode 100644 index ad074d6b89..0000000000 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/hosted_mcp_tools/agent.manifest.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: echo-agent-responses-streaming -description: > - A simple echo agent that streams responses word-by-word using the - azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). -metadata: - tags: - - AI Agent Hosting - - Azure AI AgentServer - - Responses Protocol - - Streaming -template: - name: echo-agent-responses-streaming - kind: hosted - protocols: - - protocol: responses - version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml deleted file mode 100644 index ad074d6b89..0000000000 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/local_tools/agent.manifest.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: echo-agent-responses-streaming -description: > - A simple echo agent that streams responses word-by-word using the - azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). -metadata: - tags: - - AI Agent Hosting - - Azure AI AgentServer - - Responses Protocol - - Streaming -template: - name: echo-agent-responses-streaming - kind: hosted - protocols: - - protocol: responses - version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml deleted file mode 100644 index ad074d6b89..0000000000 --- a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/workflows/agent.manifest.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: echo-agent-responses-streaming -description: > - A simple echo agent that streams responses word-by-word using the - azure-ai-agentserver-responses SDK with Server-Sent Events (SSE). -metadata: - tags: - - AI Agent Hosting - - Azure AI AgentServer - - Responses Protocol - - Streaming -template: - name: echo-agent-responses-streaming - kind: hosted - protocols: - - protocol: responses - version: v0.1.0 \ No newline at end of file From 8403d57cafe363969e7339356f6e25520f3c5e01 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 8 Apr 2026 16:48:24 -0700 Subject: [PATCH 05/13] Add invocations --- .../core/agent_framework/foundry/__init__.py | 1 + .../core/agent_framework/foundry/__init__.pyi | 3 +- .../__init__.py | 3 +- .../_invocations.py | 73 +++++++++++++++++++ .../_responses.py | 14 ++-- .../01-basic/README.md | 13 ++++ .../01-basic/main.py | 35 +++++++++ .../01-basic/requirements.txt | 0 .../01-basic/.dockerignore | 0 .../01-basic/Dockerfile | 0 .../01-basic/README.md | 19 +++++ .../01-basic/agent.manifest.yaml | 0 .../01-basic/agent.yaml | 0 .../01-basic/main.py | 0 .../01-basic}/requirements.txt | 0 .../02_local_tools/.dockerignore | 0 .../02_local_tools/Dockerfile | 0 .../02_local_tools/README.md | 13 ++++ .../02_local_tools/agent.manifest.yaml | 0 .../02_local_tools/agent.yaml | 0 .../02_local_tools/main.py | 0 .../02_local_tools}/requirements.txt | 0 .../03_remote_mcp/.dockerignore | 0 .../03_remote_mcp/Dockerfile | 0 .../03_remote_mcp/README.md | 13 ++++ .../03_remote_mcp/agent.manifest.yaml | 0 .../03_remote_mcp/agent.yaml | 0 .../03_remote_mcp/main.py | 0 .../03_remote_mcp}/requirements.txt | 0 .../04-workflows/.dockerignore | 0 .../04-workflows/Dockerfile | 0 .../04-workflows/README.md | 13 ++++ .../04-workflows/agent.manifest.yaml | 0 .../04-workflows/agent.yaml | 0 .../04-workflows/main.py | 0 .../04-workflows/requirements.txt | 2 + 36 files changed, 194 insertions(+), 8 deletions(-) create mode 100644 python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_invocations}/01-basic/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/01-basic/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/01-basic/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/01-basic/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/01-basic/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/01-basic/main.py (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses/02_local_tools => foundry_hosted_responses/01-basic}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/02_local_tools/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/02_local_tools/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/02_local_tools/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/02_local_tools/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/02_local_tools/main.py (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses/03_remote_mcp => foundry_hosted_responses/02_local_tools}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/03_remote_mcp/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/03_remote_mcp/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/03_remote_mcp/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/03_remote_mcp/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/03_remote_mcp/main.py (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses/04-workflows => foundry_hosted_responses/03_remote_mcp}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/04-workflows/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/04-workflows/Dockerfile (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/04-workflows/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/04-workflows/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/{foundry_vnext_responses => foundry_hosted_responses}/04-workflows/main.py (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt diff --git a/python/packages/core/agent_framework/foundry/__init__.py b/python/packages/core/agent_framework/foundry/__init__.py index f438b2ab10..eb9231f1da 100644 --- a/python/packages/core/agent_framework/foundry/__init__.py +++ b/python/packages/core/agent_framework/foundry/__init__.py @@ -33,6 +33,7 @@ "evaluate_foundry_target": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_traces": ("agent_framework_foundry", "agent-framework-foundry"), "ResponsesHostServer": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), + "InvocationsHostServer": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), } diff --git a/python/packages/core/agent_framework/foundry/__init__.pyi b/python/packages/core/agent_framework/foundry/__init__.pyi index 2d2a5e2920..3e7512b3aa 100644 --- a/python/packages/core/agent_framework/foundry/__init__.pyi +++ b/python/packages/core/agent_framework/foundry/__init__.pyi @@ -20,7 +20,7 @@ from agent_framework_foundry import ( evaluate_foundry_target, evaluate_traces, ) -from agent_framework_foundry_hosting import ResponsesHostServer +from agent_framework_foundry_hosting import InvocationsHostServer, ResponsesHostServer from agent_framework_foundry_local import ( FoundryLocalChatOptions, FoundryLocalClient, @@ -40,6 +40,7 @@ __all__ = [ "FoundryLocalClient", "FoundryLocalSettings", "FoundryMemoryProvider", + "InvocationsHostServer", "RawAnthropicFoundryClient", "RawFoundryAgent", "RawFoundryAgentChatClient", diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py index bad5e56fe0..81e8430783 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/__init__.py @@ -2,6 +2,7 @@ import importlib.metadata +from ._invocations import InvocationsHostServer from ._responses import ResponsesHostServer try: @@ -9,4 +10,4 @@ except importlib.metadata.PackageNotFoundError: __version__ = "0.0.0" -__all__ = ["ResponsesHostServer"] +__all__ = ["InvocationsHostServer", "ResponsesHostServer"] diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py new file mode 100644 index 0000000000..ae3330fff4 --- /dev/null +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft. All rights reserved. + +from agent_framework import AgentSession, BaseAgent, SupportsAgentRun +from azure.ai.agentserver.invocations import InvocationAgentServerHost +from starlette.requests import Request +from starlette.responses import JSONResponse, Response, StreamingResponse +from typing_extensions import Any, AsyncGenerator, Optional + + +class InvocationsHostServer(InvocationAgentServerHost): + """An invocations server host for an agent.""" + + def __init__( + self, + agent: BaseAgent, + *, + stream: bool = False, + openapi_spec: Optional[dict[str, Any]] = None, + **kwargs: Any, + ) -> None: + """Initialize an InvocationsHostServer. + + Args: + agent: The agent to handle responses for. + stream: Whether to stream the responses. Defaults to True. + openapi_spec: The OpenAPI specification for the server. + **kwargs: Additional keyword arguments. + + This host will expect the request to be a JSON body with a "message" field. + The response from the host will be a JSON object with a "response" field containing + the agent's response and a "session_id" field containing the session ID. + """ + super().__init__(openapi_spec=openapi_spec, **kwargs) + + if not isinstance(agent, SupportsAgentRun): + raise TypeError("Agent must support the SupportsAgentRun interface") + + self._agent = agent + self._stream = stream + self._sessions: dict[str, AgentSession] = {} + self.invoke_handler(self._handle_invoke) # pyright: ignore[reportUnknownMemberType] + + async def _handle_invoke(self, request: Request) -> Response: + """Invoke the agent with the given request.""" + data = await request.json() + session_id: str = request.state.session_id + + user_message = data.get("message", None) + if user_message is None: + error = "Missing 'message' in request" + if self._stream: + return StreamingResponse(content=error, status_code=400) + return Response(content=error, status_code=400) + + session = self._sessions.setdefault(session_id, AgentSession(session_id=session_id)) + + if self._stream: + + async def stream_response() -> AsyncGenerator[str]: + async for update in self._agent.run(user_message, session=session, stream=True): + yield update.text + + return StreamingResponse( + stream_response(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) + + response = await self._agent.run([user_message], session=session, stream=self._stream) + return JSONResponse({ + "response": response.text, + "session_id": session_id, + }) diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index eca938afbd..b865b21d9a 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -81,7 +81,7 @@ def __init__( if not isinstance(agent, SupportsAgentRun): raise TypeError("Agent must support the SupportsAgentRun interface") - self.agent = agent + self._agent = agent self.create_handler(self._handle_create) # pyright: ignore[reportUnknownMemberType] async def _handle_create( @@ -92,17 +92,17 @@ async def _handle_create( ) -> AsyncIterable[dict[str, Any]]: # Replace or add a history provider that has `load_messages=True` history_provider_idx: list[int] = [] - for i, provider in enumerate(self.agent.context_providers): + for i, provider in enumerate(self._agent.context_providers): if isinstance(provider, HistoryProvider) and provider.load_messages: history_provider_idx.append(i) if not history_provider_idx: - self.agent.context_providers.append(ResponsesHostContextProvider(context)) + self._agent.context_providers.append(ResponsesHostContextProvider(context)) elif len(history_provider_idx) > 1: # There shouldn't be more than one history provider with `load_messages=True` raise RuntimeError("There shouldn't be more than one history provider with `load_messages=True`") else: - self.agent.context_providers[history_provider_idx[0]] = ResponsesHostContextProvider(context) + self._agent.context_providers[history_provider_idx[0]] = ResponsesHostContextProvider(context) input_items = get_input_text(request) @@ -111,9 +111,11 @@ async def _handle_create( yield stream.emit_created() yield stream.emit_in_progress() + # Add reasoning + if request.stream is None or request.stream is False: # Run the agent in non-streaming mode - response = await self.agent.run(input_items, stream=False) + response = await self._agent.run(input_items, stream=False) for item in stream.output_item_message(response.text): yield item yield stream.emit_completed() @@ -126,7 +128,7 @@ async def _handle_create( # Invoke the MAF agent full_text = "" - async for update in self.agent.run(input_items, stream=True): + async for update in self._agent.run(input_items, stream=True): full_text += update.text yield text_content.emit_delta(update.text) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md new file mode 100644 index 0000000000..040f562f64 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `invocations` API + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/invocations -H "Content-Type: application/json" -d '{"message": "Hi!"}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py new file mode 100644 index 0000000000..4145aee14e --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, InvocationsHostServer +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = InvocationsHostServer(agent) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md new file mode 100644 index 0000000000..57686cddda --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md @@ -0,0 +1,19 @@ +# Basic example of hosting an agent with the `responses` API + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/01-basic/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md new file mode 100644 index 0000000000..b262afa7ab --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and local tools + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "What is the weather in Seattle?"}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/02_local_tools/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md new file mode 100644 index 0000000000..5091871aa8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and a remote MCP + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "List all the repositories I own on GitHub."}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/03_remote_mcp/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md new file mode 100644 index 0000000000..75d87c3fcb --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md @@ -0,0 +1,13 @@ +# Basic example of hosting an agent with the `responses` API and a workflow + +Run the following command to start the server: + +```bash +python main.py +``` + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Create a slogan for a new electric SUV that is affordable and fun to drive."}' +``` diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_vnext_responses/04-workflows/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file From ea625a7f5aca36461e1b2c27097ad28789435545 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 09:04:34 -0700 Subject: [PATCH 06/13] Comments 1 --- .../core/agent_framework/foundry/__init__.py | 3 --- .../core/agent_framework/foundry/__init__.pyi | 3 --- python/packages/core/pyproject.toml | 1 - .../{_shared.py => _message_converter.py} | 0 .../agent_framework_foundry_hosting/_responses.py | 12 ++++++------ python/packages/foundry_hosting/pyproject.toml | 4 ++-- .../foundry_hosted_invocations/01-basic/main.py | 3 ++- .../foundry_hosted_responses/01-basic/main.py | 3 ++- .../foundry_hosted_responses/02_local_tools/main.py | 3 ++- .../foundry_hosted_responses/03_remote_mcp/main.py | 3 ++- .../foundry_hosted_responses/04-workflows/main.py | 3 ++- 11 files changed, 18 insertions(+), 20 deletions(-) rename python/packages/foundry_hosting/agent_framework_foundry_hosting/{_shared.py => _message_converter.py} (100%) diff --git a/python/packages/core/agent_framework/foundry/__init__.py b/python/packages/core/agent_framework/foundry/__init__.py index eb9231f1da..b1d2b88450 100644 --- a/python/packages/core/agent_framework/foundry/__init__.py +++ b/python/packages/core/agent_framework/foundry/__init__.py @@ -6,7 +6,6 @@ - ``agent-framework-anthropic`` - ``agent-framework-foundry`` - ``agent-framework-foundry-local`` -- ``agent-framework-foundry-hosting`` """ import importlib @@ -32,8 +31,6 @@ "RawFoundryEmbeddingClient": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_foundry_target": ("agent_framework_foundry", "agent-framework-foundry"), "evaluate_traces": ("agent_framework_foundry", "agent-framework-foundry"), - "ResponsesHostServer": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), - "InvocationsHostServer": ("agent_framework_foundry_hosting", "agent-framework-foundry-hosting"), } diff --git a/python/packages/core/agent_framework/foundry/__init__.pyi b/python/packages/core/agent_framework/foundry/__init__.pyi index 3e7512b3aa..47eb92b3af 100644 --- a/python/packages/core/agent_framework/foundry/__init__.pyi +++ b/python/packages/core/agent_framework/foundry/__init__.pyi @@ -20,7 +20,6 @@ from agent_framework_foundry import ( evaluate_foundry_target, evaluate_traces, ) -from agent_framework_foundry_hosting import InvocationsHostServer, ResponsesHostServer from agent_framework_foundry_local import ( FoundryLocalChatOptions, FoundryLocalClient, @@ -40,13 +39,11 @@ __all__ = [ "FoundryLocalClient", "FoundryLocalSettings", "FoundryMemoryProvider", - "InvocationsHostServer", "RawAnthropicFoundryClient", "RawFoundryAgent", "RawFoundryAgentChatClient", "RawFoundryChatClient", "RawFoundryEmbeddingClient", - "ResponsesHostServer", "evaluate_foundry_target", "evaluate_traces", ] diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index c6e7ef2b42..e4aa1f5e40 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -48,7 +48,6 @@ all = [ "agent-framework-durabletask", "agent-framework-foundry", "agent-framework-foundry-local", - "agent-framework-foundry-hosting", "agent-framework-github-copilot; python_version >= '3.11'", "agent-framework-lab", "agent-framework-mem0", diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py similarity index 100% rename from python/packages/foundry_hosting/agent_framework_foundry_hosting/_shared.py rename to python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index b865b21d9a..221ae279ca 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -14,14 +14,14 @@ from azure.ai.agentserver.responses.models import CreateResponse, get_input_text from typing_extensions import Any, Sequence -from ._shared import to_messages +from ._message_converter import to_messages -class ResponsesHostContextProvider(HistoryProvider): +class ResponsesHostHistoryProvider(HistoryProvider): """A history provider that retrieves messages from a ResponseContext.""" def __init__(self, context: ResponseContext): - """Initialize a ResponsesHostContextProvider. + """Initialize a ResponsesHostHistoryProvider. Args: context: The ResponseContext to retrieve messages from. @@ -73,7 +73,7 @@ def __init__( Note: If the agent has a history provider with `load_messages=True`, it will be - replaced with a `ResponsesHostContextProvider` that will retrieve history + replaced with a `ResponsesHostHistoryProvider` that will retrieve history from the hosting infrastructure. """ super().__init__(prefix=prefix, options=options, provider=provider, **kwargs) @@ -97,12 +97,12 @@ async def _handle_create( history_provider_idx.append(i) if not history_provider_idx: - self._agent.context_providers.append(ResponsesHostContextProvider(context)) + self._agent.context_providers.append(ResponsesHostHistoryProvider(context)) elif len(history_provider_idx) > 1: # There shouldn't be more than one history provider with `load_messages=True` raise RuntimeError("There shouldn't be more than one history provider with `load_messages=True`") else: - self._agent.context_providers[history_provider_idx[0]] = ResponsesHostContextProvider(context) + self._agent.context_providers[history_provider_idx[0]] = ResponsesHostHistoryProvider(context) input_items = get_input_text(request) diff --git a/python/packages/foundry_hosting/pyproject.toml b/python/packages/foundry_hosting/pyproject.toml index 1f0c42dbbf..3bb6c71495 100644 --- a/python/packages/foundry_hosting/pyproject.toml +++ b/python/packages/foundry_hosting/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agent-framework-foundry-hosting" -description = "Foundry Local integration for Microsoft Agent Framework." +description = "Foundry Hosting integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" @@ -12,7 +12,7 @@ urls.release_notes = "https://github.com/microsoft/agent-framework/releases?q=ta urls.issues = "https://github.com/microsoft/agent-framework/issues" classifiers = [ "License :: OSI Approved :: MIT License", - "Development Status :: 4 - Beta", + "Development Status :: 4 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py index 4145aee14e..17f9cb6341 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_invocations/01-basic/main.py @@ -3,7 +3,8 @@ import os from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, InvocationsHostServer +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import InvocationsHostServer from azure.identity import AzureCliCredential from dotenv import load_dotenv diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py index 047719efd9..0be716558b 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py @@ -3,7 +3,8 @@ import os from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer from azure.ai.agentserver.responses import InMemoryResponseProvider from azure.identity import AzureCliCredential from dotenv import load_dotenv diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py index 6d213bf8c3..7c8b3f6dc8 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/02_local_tools/main.py @@ -4,7 +4,8 @@ from random import randint from agent_framework import Agent, tool -from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer from azure.ai.agentserver.responses import InMemoryResponseProvider from azure.identity import AzureCliCredential from dotenv import load_dotenv diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py index 455b13f1f2..9a244f686f 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/03_remote_mcp/main.py @@ -3,7 +3,8 @@ import os from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer from azure.ai.agentserver.responses import InMemoryResponseProvider from azure.identity import AzureCliCredential from dotenv import load_dotenv diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py index 9d6ed68158..8965dc6a21 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py @@ -3,8 +3,9 @@ import os from agent_framework import Agent -from agent_framework.foundry import FoundryChatClient, ResponsesHostServer +from agent_framework.foundry import FoundryChatClient from agent_framework.orchestrations import GroupChatBuilder, GroupChatState +from agent_framework_foundry_hosting import ResponsesHostServer from azure.ai.agentserver.responses import InMemoryResponseProvider from azure.identity import AzureCliCredential from dotenv import load_dotenv From f26208757e1a5fee35fa589864585c705c24fe83 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 11:46:26 -0700 Subject: [PATCH 07/13] Comments 2 --- .../core/agent_framework/_telemetry.py | 31 +- python/packages/foundry_hosting/README.md | 11 + .../_invocations.py | 4 + .../_message_converter.py | 159 ---------- .../_responses.py | 284 ++++++++++++++---- .../packages/foundry_hosting/pyproject.toml | 7 +- python/pyproject.toml | 3 - 7 files changed, 269 insertions(+), 230 deletions(-) delete mode 100644 python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py diff --git a/python/packages/core/agent_framework/_telemetry.py b/python/packages/core/agent_framework/_telemetry.py index a044fc9d02..f4d259806d 100644 --- a/python/packages/core/agent_framework/_telemetry.py +++ b/python/packages/core/agent_framework/_telemetry.py @@ -26,6 +26,28 @@ HTTP_USER_AGENT: Final[str] = "agent-framework-python" AGENT_FRAMEWORK_USER_AGENT = f"{HTTP_USER_AGENT}/{version_info}" # type: ignore[has-type] +_user_agent_prefixes: list[str] = [] + + +def append_to_user_agent(prefix: str) -> None: + """Prepend a prefix to the agent framework user agent string. + + This is useful for hosting layers that want to identify themselves in telemetry. + Duplicate prefixes are ignored. + + Args: + prefix: The prefix to prepend (e.g. "foundry-hosting-responses"). + """ + if prefix and prefix not in _user_agent_prefixes: + _user_agent_prefixes.append(prefix) + + +def _get_user_agent() -> str: + """Return the full user agent string including any prepended prefixes.""" + if not _user_agent_prefixes: + return AGENT_FRAMEWORK_USER_AGENT + return f"{'/'.join(_user_agent_prefixes)}/{AGENT_FRAMEWORK_USER_AGENT}" + def prepend_agent_framework_to_user_agent(headers: dict[str, Any] | None = None) -> dict[str, Any]: """Prepend "agent-framework" to the User-Agent in the headers. @@ -57,12 +79,9 @@ def prepend_agent_framework_to_user_agent(headers: dict[str, Any] | None = None) """ if not IS_TELEMETRY_ENABLED: return headers or {} + user_agent = _get_user_agent() if not headers: - return {USER_AGENT_KEY: AGENT_FRAMEWORK_USER_AGENT} - headers[USER_AGENT_KEY] = ( - f"{AGENT_FRAMEWORK_USER_AGENT} {headers[USER_AGENT_KEY]}" - if USER_AGENT_KEY in headers - else AGENT_FRAMEWORK_USER_AGENT - ) + return {USER_AGENT_KEY: user_agent} + headers[USER_AGENT_KEY] = f"{user_agent} {headers[USER_AGENT_KEY]}" if USER_AGENT_KEY in headers else user_agent return headers diff --git a/python/packages/foundry_hosting/README.md b/python/packages/foundry_hosting/README.md index e69de29bb2..9222c01aa8 100644 --- a/python/packages/foundry_hosting/README.md +++ b/python/packages/foundry_hosting/README.md @@ -0,0 +1,11 @@ +# Foundry Hosting + +This package provides the integration of Agent Framework agents and workflows with the Foundry Agent Server, which can be hosted on Foundry infrastructure. + +## Responses + +TODO + +## Invocations + +TODO diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py index ae3330fff4..2feb1001ba 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. from agent_framework import AgentSession, BaseAgent, SupportsAgentRun +from agent_framework._telemetry import append_to_user_agent from azure.ai.agentserver.invocations import InvocationAgentServerHost from starlette.requests import Request from starlette.responses import JSONResponse, Response, StreamingResponse @@ -10,6 +11,8 @@ class InvocationsHostServer(InvocationAgentServerHost): """An invocations server host for an agent.""" + USER_AGENT_PREFIX = "foundry-hosting-invocations" + def __init__( self, agent: BaseAgent, @@ -35,6 +38,7 @@ def __init__( if not isinstance(agent, SupportsAgentRun): raise TypeError("Agent must support the SupportsAgentRun interface") + append_to_user_agent(self.USER_AGENT_PREFIX) self._agent = agent self._stream = stream self._sessions: dict[str, AgentSession] = {} diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py deleted file mode 100644 index f76306af11..0000000000 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_message_converter.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from agent_framework import Content, Message -from azure.ai.agentserver.responses.models import ( - ComputerScreenshotContent, - FunctionCallOutputItemParam, - MessageContent, - MessageContentInputFileContent, - MessageContentInputImageContent, - MessageContentInputTextContent, - MessageContentOutputTextContent, - MessageContentReasoningTextContent, - MessageContentRefusalContent, - OutputItem, - OutputItemFunctionToolCall, - OutputItemMessage, - OutputItemOutputMessage, - OutputItemReasoningItem, - OutputMessageContent, - OutputMessageContentOutputTextContent, - OutputMessageContentRefusalContent, - SummaryTextContent, - TextContent, -) -from typing_extensions import Sequence, cast - - -def to_messages(history: Sequence[OutputItem]) -> list[Message]: - """Converts a sequence of OutputItem objects to a list of Message objects. - - Args: - history (Sequence[OutputItem]): The sequence of OutputItem objects to convert. - - Returns: - list[Message]: The list of Message objects. - """ - messages: list[Message] = [] - for item in history: - messages.append(_to_message(item)) - return messages - - -def _to_message(item: OutputItem) -> Message: - """Converts an OutputItem to a Message. - - Args: - item (OutputItem): The OutputItem to convert. - - Returns: - Message: The converted Message. - - Raises: - ValueError: If the OutputItem type is not supported. - """ - if item.type == "output_message": - msg = cast(OutputItemOutputMessage, item) - contents = [_convert_output_message_content(part) for part in msg.content] - return Message(role=msg.role, contents=contents) - - if item.type == "message": - msg = cast(OutputItemMessage, item) - contents = [_convert_message_content(part) for part in msg.content] - return Message(role=msg.role, contents=contents) - - if item.type == "function_call": - fc = cast(OutputItemFunctionToolCall, item) - return Message( - role="assistant", - contents=[Content.from_function_call(fc.call_id, fc.name, arguments=fc.arguments)], - ) - - if item.type == "function_call_output": - fco = cast(FunctionCallOutputItemParam, item) - output = fco.output if isinstance(fco.output, str) else str(fco.output) - return Message( - role="tool", - contents=[Content.from_function_result(fco.call_id, result=output)], - ) - - if item.type == "reasoning": - reasoning = cast(OutputItemReasoningItem, item) - contents: list[Content] = [] - if reasoning.summary: - for summary in reasoning.summary: - contents.append(Content.from_text(summary.text)) - return Message(role="assistant", contents=contents) - - raise ValueError(f"Unsupported OutputItem type: {item.type}") - - -def _convert_output_message_content(content: OutputMessageContent) -> Content: - """Converts an OutputMessageContent to a Content object. - - Args: - content (OutputMessageContent): The OutputMessageContent to convert. - - Returns: - Content: The converted Content object. - - Raises: - ValueError: If the OutputMessageContent type is not supported. - """ - if content.type == "output_text": - text_content = cast(OutputMessageContentOutputTextContent, content) - return Content.from_text(text_content.text) - if content.type == "refusal": - refusal_content = cast(OutputMessageContentRefusalContent, content) - return Content.from_text(refusal_content.refusal) - - raise ValueError(f"Unsupported OutputMessageContent type: {content.type}") - - -def _convert_message_content(content: MessageContent) -> Content: - """Converts a MessageContent to a Content object. - - Args: - content (MessageContent): The MessageContent to convert. - - Returns: - Content: The converted Content object. - - Raises: - ValueError: If the MessageContent type is not supported. - """ - if content.type == "input_text": - input_text = cast(MessageContentInputTextContent, content) - return Content.from_text(input_text.text) - if content.type == "output_text": - output_text = cast(MessageContentOutputTextContent, content) - return Content.from_text(output_text.text) - if content.type == "text": - text = cast(TextContent, content) - return Content.from_text(text.text) - if content.type == "summary_text": - summary = cast(SummaryTextContent, content) - return Content.from_text(summary.text) - if content.type == "refusal": - refusal = cast(MessageContentRefusalContent, content) - return Content.from_text(refusal.refusal) - if content.type == "reasoning_text": - reasoning = cast(MessageContentReasoningTextContent, content) - return Content.from_text_reasoning(text=reasoning.text) - if content.type == "input_image": - image = cast(MessageContentInputImageContent, content) - if image.image_url: - return Content.from_uri(image.image_url) - if image.file_id: - return Content.from_hosted_file(image.file_id) - if content.type == "input_file": - file = cast(MessageContentInputFileContent, content) - if file.file_url: - return Content.from_uri(file.file_url) - if file.file_id: - return Content.from_hosted_file(file.file_id, name=file.filename) - if content.type == "computer_screenshot": - screenshot = cast(ComputerScreenshotContent, content) - return Content.from_uri(screenshot.image_url) - - raise ValueError(f"Unsupported MessageContent type: {content.type}") diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index 221ae279ca..99c542e4e4 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -3,7 +3,8 @@ import asyncio from collections.abc import AsyncIterable -from agent_framework import BaseAgent, HistoryProvider, Message, SupportsAgentRun +from agent_framework import Agent, ChatOptions, Content, HistoryProvider, Message +from agent_framework._telemetry import append_to_user_agent from azure.ai.agentserver.responses import ( ResponseContext, ResponseEventStream, @@ -11,51 +12,40 @@ ResponsesServerOptions, ) from azure.ai.agentserver.responses.hosting import ResponsesAgentServerHost -from azure.ai.agentserver.responses.models import CreateResponse, get_input_text -from typing_extensions import Any, Sequence - -from ._message_converter import to_messages - - -class ResponsesHostHistoryProvider(HistoryProvider): - """A history provider that retrieves messages from a ResponseContext.""" - - def __init__(self, context: ResponseContext): - """Initialize a ResponsesHostHistoryProvider. - - Args: - context: The ResponseContext to retrieve messages from. - """ - super().__init__("responses-host", load_messages=True) - self.context = context - - async def get_messages( - self, - session_id: str | None, - *, - state: dict[str, Any] | None = None, - **kwargs: Any, - ) -> list[Message]: - history = await self.context.get_history() - return to_messages(history) - - async def save_messages( - self, - session_id: str | None, - messages: Sequence[Message], - *, - state: dict[str, Any] | None = None, - **kwargs: Any, - ) -> None: - pass +from azure.ai.agentserver.responses.models import ( + ComputerScreenshotContent, + CreateResponse, + FunctionCallOutputItemParam, + MessageContent, + MessageContentInputFileContent, + MessageContentInputImageContent, + MessageContentInputTextContent, + MessageContentOutputTextContent, + MessageContentReasoningTextContent, + MessageContentRefusalContent, + OutputItem, + OutputItemFunctionToolCall, + OutputItemMessage, + OutputItemOutputMessage, + OutputItemReasoningItem, + OutputMessageContent, + OutputMessageContentOutputTextContent, + OutputMessageContentRefusalContent, + SummaryTextContent, + TextContent, + get_input_text, +) +from typing_extensions import Any, Sequence, cast class ResponsesHostServer(ResponsesAgentServerHost): """A responses server host for an agent.""" + USER_AGENT_PREFIX = "foundry-hosting-responses" + def __init__( self, - agent: BaseAgent, + agent: Agent, *, prefix: str = "", options: ResponsesServerOptions | None = None, @@ -72,39 +62,42 @@ def __init__( **kwargs: Additional keyword arguments. Note: - If the agent has a history provider with `load_messages=True`, it will be - replaced with a `ResponsesHostHistoryProvider` that will retrieve history - from the hosting infrastructure. + The agent must not have a history provider with `load_messages=True`, + because history is managed by the hosting infrastructure. """ super().__init__(prefix=prefix, options=options, provider=provider, **kwargs) - if not isinstance(agent, SupportsAgentRun): - raise TypeError("Agent must support the SupportsAgentRun interface") - + self._validate_agent(agent) self._agent = agent self.create_handler(self._handle_create) # pyright: ignore[reportUnknownMemberType] + # Append the user agent prefix for telemetry purposes + append_to_user_agent(self.USER_AGENT_PREFIX) + + def _validate_agent(self, agent: Agent) -> None: + """Validate the agent to ensure it does not have a history provider with `load_messages=True`. + + History is managed by the hosting infrastructure. + """ + for provider in agent.context_providers: + if isinstance(provider, HistoryProvider) and provider.load_messages: + raise RuntimeError( + "There shouldn't be a history provider with `load_messages=True` already present. " + "History is managed by the hosting infrastructure." + ) + async def _handle_create( self, request: CreateResponse, context: ResponseContext, cancellation_signal: asyncio.Event, ) -> AsyncIterable[dict[str, Any]]: - # Replace or add a history provider that has `load_messages=True` - history_provider_idx: list[int] = [] - for i, provider in enumerate(self._agent.context_providers): - if isinstance(provider, HistoryProvider) and provider.load_messages: - history_provider_idx.append(i) - - if not history_provider_idx: - self._agent.context_providers.append(ResponsesHostHistoryProvider(context)) - elif len(history_provider_idx) > 1: - # There shouldn't be more than one history provider with `load_messages=True` - raise RuntimeError("There shouldn't be more than one history provider with `load_messages=True`") - else: - self._agent.context_providers[history_provider_idx[0]] = ResponsesHostHistoryProvider(context) - + """Handle the creation of a response.""" input_items = get_input_text(request) + history = await context.get_history() + messages = [*_to_messages(history), input_items] + + chat_options = _to_chat_options(request) stream = ResponseEventStream(response_id=context.response_id, model=request.model) @@ -115,7 +108,7 @@ async def _handle_create( if request.stream is None or request.stream is False: # Run the agent in non-streaming mode - response = await self._agent.run(input_items, stream=False) + response = await self._agent.run(messages, stream=False, options=chat_options) for item in stream.output_item_message(response.text): yield item yield stream.emit_completed() @@ -128,7 +121,7 @@ async def _handle_create( # Invoke the MAF agent full_text = "" - async for update in self._agent.run(input_items, stream=True): + async for update in self._agent.run(messages, stream=True, options=chat_options): full_text += update.text yield text_content.emit_delta(update.text) @@ -138,3 +131,172 @@ async def _handle_create( yield message_item.emit_done() yield stream.emit_completed() + + +# region Option Conversion + + +def _to_chat_options(request: CreateResponse) -> ChatOptions: + """Converts a CreateResponse request to ChatOptions. + + Args: + request (CreateResponse): The request to convert. + + Returns: + ChatOptions: The converted ChatOptions. + """ + chat_options = ChatOptions() + + if request.temperature is not None: + chat_options["temperature"] = request.temperature + if request.top_p is not None: + chat_options["top_p"] = request.top_p + if request.max_output_tokens is not None: + chat_options["max_tokens"] = request.max_output_tokens + if request.parallel_tool_calls is not None: + chat_options["allow_multiple_tool_calls"] = request.parallel_tool_calls + + return chat_options + + +# endregion + + +# region Message Conversion + + +def _to_messages(history: Sequence[OutputItem]) -> list[Message]: + """Converts a sequence of OutputItem objects to a list of Message objects. + + Args: + history (Sequence[OutputItem]): The sequence of OutputItem objects to convert. + + Returns: + list[Message]: The list of Message objects. + """ + messages: list[Message] = [] + for item in history: + messages.append(_to_message(item)) + return messages + + +def _to_message(item: OutputItem) -> Message: + """Converts an OutputItem to a Message. + + Args: + item (OutputItem): The OutputItem to convert. + + Returns: + Message: The converted Message. + + Raises: + ValueError: If the OutputItem type is not supported. + """ + if item.type == "output_message": + msg = cast(OutputItemOutputMessage, item) + contents = [_convert_output_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "message": + msg = cast(OutputItemMessage, item) + contents = [_convert_message_content(part) for part in msg.content] + return Message(role=msg.role, contents=contents) + + if item.type == "function_call": + fc = cast(OutputItemFunctionToolCall, item) + return Message( + role="assistant", + contents=[Content.from_function_call(fc.call_id, fc.name, arguments=fc.arguments)], + ) + + if item.type == "function_call_output": + fco = cast(FunctionCallOutputItemParam, item) + output = fco.output if isinstance(fco.output, str) else str(fco.output) + return Message( + role="tool", + contents=[Content.from_function_result(fco.call_id, result=output)], + ) + + if item.type == "reasoning": + reasoning = cast(OutputItemReasoningItem, item) + contents: list[Content] = [] + if reasoning.summary: + for summary in reasoning.summary: + contents.append(Content.from_text(summary.text)) + return Message(role="assistant", contents=contents) + + raise ValueError(f"Unsupported OutputItem type: {item.type}") + + +def _convert_output_message_content(content: OutputMessageContent) -> Content: + """Converts an OutputMessageContent to a Content object. + + Args: + content (OutputMessageContent): The OutputMessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the OutputMessageContent type is not supported. + """ + if content.type == "output_text": + text_content = cast(OutputMessageContentOutputTextContent, content) + return Content.from_text(text_content.text) + if content.type == "refusal": + refusal_content = cast(OutputMessageContentRefusalContent, content) + return Content.from_text(refusal_content.refusal) + + raise ValueError(f"Unsupported OutputMessageContent type: {content.type}") + + +def _convert_message_content(content: MessageContent) -> Content: + """Converts a MessageContent to a Content object. + + Args: + content (MessageContent): The MessageContent to convert. + + Returns: + Content: The converted Content object. + + Raises: + ValueError: If the MessageContent type is not supported. + """ + if content.type == "input_text": + input_text = cast(MessageContentInputTextContent, content) + return Content.from_text(input_text.text) + if content.type == "output_text": + output_text = cast(MessageContentOutputTextContent, content) + return Content.from_text(output_text.text) + if content.type == "text": + text = cast(TextContent, content) + return Content.from_text(text.text) + if content.type == "summary_text": + summary = cast(SummaryTextContent, content) + return Content.from_text(summary.text) + if content.type == "refusal": + refusal = cast(MessageContentRefusalContent, content) + return Content.from_text(refusal.refusal) + if content.type == "reasoning_text": + reasoning = cast(MessageContentReasoningTextContent, content) + return Content.from_text_reasoning(text=reasoning.text) + if content.type == "input_image": + image = cast(MessageContentInputImageContent, content) + if image.image_url: + return Content.from_uri(image.image_url) + if image.file_id: + return Content.from_hosted_file(image.file_id) + if content.type == "input_file": + file = cast(MessageContentInputFileContent, content) + if file.file_url: + return Content.from_uri(file.file_url) + if file.file_id: + return Content.from_hosted_file(file.file_id, name=file.filename) + if content.type == "computer_screenshot": + screenshot = cast(ComputerScreenshotContent, content) + return Content.from_uri(screenshot.image_url) + + raise ValueError(f"Unsupported MessageContent type: {content.type}") + + +# endregion diff --git a/python/packages/foundry_hosting/pyproject.toml b/python/packages/foundry_hosting/pyproject.toml index 3bb6c71495..55916ac076 100644 --- a/python/packages/foundry_hosting/pyproject.toml +++ b/python/packages/foundry_hosting/pyproject.toml @@ -4,7 +4,7 @@ description = "Foundry Hosting integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b260402" +version = "1.0.0a260402" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -29,6 +29,11 @@ dependencies = [ "azure-ai-agentserver-invocations" ] +[tool.uv.sources] +azure-ai-agentserver-responses = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-responses" } +azure-ai-agentserver-invocations = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-invocations" } +azure-ai-agentserver-core = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-core" } + [tool.uv] prerelease = "if-necessary-or-explicit" environments = [ diff --git a/python/pyproject.toml b/python/pyproject.toml index 4accd6d888..9e677fb602 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -89,9 +89,6 @@ agent-framework-redis = { workspace = true } agent-framework-github-copilot = { workspace = true } agent-framework-claude = { workspace = true } agent-framework-orchestrations = { workspace = true } -azure-ai-agentserver-responses = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-responses" } -azure-ai-agentserver-invocations = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-invocations" } -azure-ai-agentserver-core = { git = "https://github.com/Azure/azure-sdk-for-python.git", branch = "agentserver/responses", subdirectory = "sdk/agentserver/azure-ai-agentserver-core" } litellm = { url = "https://files.pythonhosted.org/packages/57/77/0c6eca2cb049793ddf8ce9cdcd5123a35666c4962514788c4fc90edf1d3b/litellm-1.82.1-py3-none-any.whl" } [tool.ruff] From 228c60ca0b57fe61c38cce4d2762a608320447b6 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 12:25:39 -0700 Subject: [PATCH 08/13] Comments 3 --- .../agent_framework_foundry_hosting/_responses.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index 99c542e4e4..8c0447d7bd 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -112,6 +112,7 @@ async def _handle_create( for item in stream.output_item_message(response.text): yield item yield stream.emit_completed() + return # Start the streaming response message_item = stream.add_output_item_message() @@ -120,13 +121,14 @@ async def _handle_create( yield text_content.emit_added() # Invoke the MAF agent - full_text = "" - async for update in self._agent.run(messages, stream=True, options=chat_options): - full_text += update.text - yield text_content.emit_delta(update.text) + response_stream = self._agent.run(messages, stream=True, options=chat_options) + async for update in response_stream: + if update.text: + yield text_content.emit_delta(update.text) # Complete the message - yield text_content.emit_done(full_text) + final = await response_stream.get_final_response() + yield text_content.emit_done(final.text) yield message_item.emit_content_done(text_content) yield message_item.emit_done() From 735c1236ee219629cadd3a216a6417c7370676ab Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 12:46:42 -0700 Subject: [PATCH 09/13] Improve README --- .../foundry_hosted_responses/01-basic/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md index 57686cddda..e6eaa7c6c7 100644 --- a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md @@ -1,19 +1,35 @@ # Basic example of hosting an agent with the `responses` API +## Running the server locally + Run the following command to start the server: ```bash python main.py ``` +## Interacting with the agent + Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: ```bash curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' ``` +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: ```bash curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' ``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO From 20ac97c69d183d51c5c28bcc9fa51ae2271055e2 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 14:15:34 -0700 Subject: [PATCH 10/13] Add local shell sample --- .../{01-basic => 01_basic}/.dockerignore | 0 .../{01-basic => 01_basic}/Dockerfile | 0 .../{01-basic => 01_basic}/README.md | 0 .../agent.manifest.yaml | 0 .../{01-basic => 01_basic}/agent.yaml | 0 .../{01-basic => 01_basic}/main.py | 0 .../{01-basic => 01_basic}/requirements.txt | 0 .../.dockerignore | 0 .../{04-workflows => 04_workflows}/Dockerfile | 0 .../{04-workflows => 04_workflows}/README.md | 0 .../agent.manifest.yaml | 0 .../{04-workflows => 04_workflows}/agent.yaml | 0 .../{04-workflows => 04_workflows}/main.py | 0 .../requirements.txt | 0 .../05_local_shell/.dockerignore | 6 ++ .../05_local_shell/Dockerfile | 16 +++++ .../05_local_shell/README.md | 43 +++++++++++++ .../05_local_shell/agent.manifest.yaml | 15 +++++ .../05_local_shell/agent.yaml | 8 +++ .../05_local_shell/main.py | 63 +++++++++++++++++++ .../05_local_shell/requirements.txt | 2 + 21 files changed, 153 insertions(+) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/Dockerfile (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/README.md (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/main.py (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{01-basic => 01_basic}/requirements.txt (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/.dockerignore (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/Dockerfile (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/README.md (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/agent.manifest.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/agent.yaml (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/main.py (100%) rename python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/{04-workflows => 04_workflows}/requirements.txt (100%) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/README.md similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/README.md rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/README.md diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01-basic/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/01_basic/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/.dockerignore similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/.dockerignore rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/.dockerignore diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/Dockerfile similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/Dockerfile rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/Dockerfile diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/README.md similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/README.md rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/README.md diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.manifest.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.manifest.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.manifest.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.yaml similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/agent.yaml rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/agent.yaml diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/main.py similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/main.py rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/main.py diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/requirements.txt similarity index 100% rename from python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04-workflows/requirements.txt rename to python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/04_workflows/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile new file mode 100644 index 0000000000..845d325e7c --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app/user_agent + +COPY wheels/ /tmp/wheels/ +COPY requirements.txt . +RUN pip install --no-cache-dir --find-links /tmp/wheels/ -r requirements.txt && rm -rf /tmp/wheels/ + +COPY . . + +RUN useradd -r appuser +USER appuser + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md new file mode 100644 index 0000000000..73f1ad05a9 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/README.md @@ -0,0 +1,43 @@ +# Agent Framework Agent with Local Shell + +> Note: This agent can execute local shell commands. We recommend running it in an isolated environment for security reasons. + +## Running the server in a Docker container + +Build the Docker image: + +```bash +docker build -t agent-framework-agent-with-local-shell . +``` + +Run the Docker container: + +```bash +docker run -p 8088:8088 --env-file .env agent-framework-agent-with-local-shell +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml new file mode 100644 index 0000000000..32f133d12d --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-local-shell +description: > + An Agent Framework agent that can execute local shell commands hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-local-shell + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml new file mode 100644 index 0000000000..6c0a04b83b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-local-shell +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py new file mode 100644 index 0000000000..094eb766f4 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/main.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +import subprocess + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="always_require") +def run_bash(command: str) -> str: + """Execute a shell command locally and return stdout, stderr, and exit code.""" + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=30, + ) + parts: list[str] = [] + if result.stdout: + parts.append(result.stdout) + if result.stderr: + parts.append(f"stderr: {result.stderr}") + parts.append(f"exit_code: {result.returncode}") + return "\n".join(parts) + except subprocess.TimeoutExpired: + return "Command timed out after 30 seconds" + except Exception as e: + return f"Error executing command: {e}" + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[run_bash], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt new file mode 100644 index 0000000000..61b5ba14f7 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/05_local_shell/requirements.txt @@ -0,0 +1,2 @@ +agent-framework-core +agent-framework-foundry-hosting \ No newline at end of file From 8ecf274abedc07ba7cf5f7182a317dd08a5f3569 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 9 Apr 2026 17:03:00 -0700 Subject: [PATCH 11/13] WIP: Add eval and memory samples --- .../06_eval/.dockerignore | 6 ++ .../06_eval/Dockerfile | 16 ++++ .../06_eval/README.md | 35 +++++++ .../06_eval/agent.manifest.yaml | 15 +++ .../06_eval/agent.yaml | 8 ++ .../foundry_hosted_responses/06_eval/main.py | 50 ++++++++++ .../06_eval/requirements.txt | 2 + .../07_foundry_memory/.dockerignore | 6 ++ .../07_foundry_memory/Dockerfile | 16 ++++ .../07_foundry_memory/README.md | 35 +++++++ .../07_foundry_memory/agent.manifest.yaml | 15 +++ .../07_foundry_memory/agent.yaml | 8 ++ .../07_foundry_memory/main.py | 91 +++++++++++++++++++ .../07_foundry_memory/requirements.txt | 2 + 14 files changed, 305 insertions(+) create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py create mode 100644 python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md new file mode 100644 index 0000000000..195d10e966 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/README.md @@ -0,0 +1,35 @@ +# Agent Framework Agent with Evaluation + +## Running the server locally + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml new file mode 100644 index 0000000000..a6cc8af7fd --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-eval +description: > + An Agent Framework agent is evaluated on each response hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-eval + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml new file mode 100644 index 0000000000..7fa752ca8b --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-eval +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py new file mode 100644 index 0000000000..7c8b3f6dc8 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/main.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from random import randint + +from agent_framework import Agent, tool +from agent_framework.foundry import FoundryChatClient +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.identity import AzureCliCredential +from dotenv import load_dotenv +from pydantic import Field +from typing_extensions import Annotated + +# Load environment variables from .env file +load_dotenv() + + +@tool(approval_mode="never_require") +def get_weather( + location: Annotated[str, Field(description="The location to get the weather for.")], +) -> str: + """Get the weather for a given location.""" + conditions = ["sunny", "cloudy", "rainy", "stormy"] + return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C." + + +def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + tools=[get_weather], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + server.run() + + +if __name__ == "__main__": + main() diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/06_eval/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore new file mode 100644 index 0000000000..008e6e6616 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/.dockerignore @@ -0,0 +1,6 @@ +.venv +__pycache__ +*.pyc +*.pyo +*.pyd +.Python \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile new file mode 100644 index 0000000000..eaffb94f19 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/Dockerfile @@ -0,0 +1,16 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . user_agent/ +WORKDIR /app/user_agent + +RUN if [ -f requirements.txt ]; then \ + pip install -r requirements.txt; \ + else \ + echo "No requirements.txt found"; \ + fi + +EXPOSE 8088 + +CMD ["python", "main.py"] \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md new file mode 100644 index 0000000000..e9987de36d --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/README.md @@ -0,0 +1,35 @@ +# Agent Framework Agent with Foundry Memory + +## Running the server locally + +Run the following command to start the server: + +```bash +python main.py +``` + +## Interacting with the agent + +Send a POST request to the server with a JSON body containing a "message" field to interact with the agent. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "Hi"}' +``` + +The server will respond with a JSON object containing the response text and a response ID. You can use this response ID to continue the conversation in subsequent requests. + +## Multi-turn conversation + +To have a multi-turn conversation with the agent, include the previous response id in the request body. For example: + +```bash +curl -X POST http://localhost:8088/responses -H "Content-Type: application/json" -d '{"input": "How are you?", "previous_response_id": "REPLACE_WITH_PREVIOUS_RESPONSE_ID"}' +``` + +## Deploying to Foundry + +TODO + +## Using the deployed agent in Agent Framework + +TODO diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml new file mode 100644 index 0000000000..b0cf8c5fab --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.manifest.yaml @@ -0,0 +1,15 @@ +name: agent-framework-agent-with-foundry-memory +description: > + An Agent Framework agent with memory support hosted by Foundry. +metadata: + tags: + - AI Agent Hosting + - Azure AI AgentServer + - Responses Protocol + - Streaming +template: + name: agent-framework-agent-with-foundry-memory + kind: hosted + protocols: + - protocol: responses + version: v0.1.0 \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml new file mode 100644 index 0000000000..90c9a26406 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/agent.yaml @@ -0,0 +1,8 @@ +kind: hosted +name: agent-framework-agent-with-foundry-memory +protocols: + - protocol: responses + version: v0.1.0 +resources: + cpu: "0.25" + memory: 0.5Gi \ No newline at end of file diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py new file mode 100644 index 0000000000..1a46bdac3c --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/main.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +import os +from datetime import datetime, timezone + +from agent_framework import Agent +from agent_framework.foundry import FoundryChatClient, FoundryMemoryProvider +from agent_framework_foundry_hosting import ResponsesHostServer +from azure.ai.agentserver.responses import InMemoryResponseProvider +from azure.ai.projects.aio import AIProjectClient +from azure.ai.projects.models import ( + MemoryStoreDefaultDefinition, + MemoryStoreDefaultOptions, +) +from azure.identity import AzureCliCredential +from dotenv import load_dotenv + +# Load environment variables from .env file +load_dotenv() + +logging.basicConfig(level=logging.INFO) + + +async def _create_memory_store(project_client: AIProjectClient) -> FoundryMemoryProvider: + memory_store_name = f"hosted_agent_memory_{datetime.now(timezone.utc).strftime('%Y%m%d')}" + options = MemoryStoreDefaultOptions( + chat_summary_enabled=True, + user_profile_enabled=True, + user_profile_details=( + "Avoid irrelevant or sensitive data, such as age, financials, precise location, and credentials" + ), + ) + memory_store_definition = MemoryStoreDefaultDefinition( + chat_model=os.environ["FOUNDRY_MODEL"], + embedding_model=os.environ["AZURE_OPENAI_EMBEDDING_MODEL"], + options=options, + ) + memory_store = await project_client.beta.memory_stores.create( + name=memory_store_name, + description="Memory store for Agent Framework with FoundryMemoryProvider", + definition=memory_store_definition, + ) + + return FoundryMemoryProvider( + project_client=project_client, + memory_store_name=memory_store.name, + # Scope memories to a specific user, if not set, the session_id + # will be used as scope, which means memories are only shared within the same session + scope="demo", + # Do not wait to update memories after each interaction (for demo purposes) + # In production, consider setting a delay to batch updates and reduce costs + update_delay=0, + ) + + +async def _delete_memory_store(project_client: AIProjectClient, memory_store_name: str): + await project_client.beta.memory_stores.delete(name=memory_store_name) + + +async def main(): + client = FoundryChatClient( + project_endpoint=os.environ["FOUNDRY_PROJECT_ENDPOINT"], + model=os.environ["FOUNDRY_MODEL"], + credential=AzureCliCredential(), + ) + + # Create the memory store + memory_provider = await _create_memory_store(client.project_client) + + agent = Agent( + client=client, + instructions="You are a friendly assistant. Keep your answers brief.", + context_providers=[memory_provider], + # History will be managed by the hosting infrastructure, thus there + # is no need to store history by the service. Learn more at: + # https://developers.openai.com/api/reference/resources/responses/methods/create + default_options={"store": False}, + ) + + server = ResponsesHostServer(agent, provider=InMemoryResponseProvider()) + + try: + await server.run_async() + finally: + await _delete_memory_store(client.project_client, memory_provider.memory_store_name) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt new file mode 100644 index 0000000000..f7dc62f3e3 --- /dev/null +++ b/python/samples/05-end-to-end/hosted_agents/foundry_hosted_responses/07_foundry_memory/requirements.txt @@ -0,0 +1,2 @@ +agent-framework +agent-framework-foundry-hosting \ No newline at end of file From 4d7c035bb335b74aee28f6700f83cc04af8ecf6b Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Fri, 10 Apr 2026 09:52:50 -0700 Subject: [PATCH 12/13] Update user agent prefix --- .../agent_framework_foundry_hosting/_invocations.py | 2 +- .../agent_framework_foundry_hosting/_responses.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py index 2feb1001ba..08407874df 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_invocations.py @@ -11,7 +11,7 @@ class InvocationsHostServer(InvocationAgentServerHost): """An invocations server host for an agent.""" - USER_AGENT_PREFIX = "foundry-hosting-invocations" + USER_AGENT_PREFIX = "foundry-hosting" def __init__( self, diff --git a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py index 8c0447d7bd..94c710f5b8 100644 --- a/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py +++ b/python/packages/foundry_hosting/agent_framework_foundry_hosting/_responses.py @@ -41,7 +41,7 @@ class ResponsesHostServer(ResponsesAgentServerHost): """A responses server host for an agent.""" - USER_AGENT_PREFIX = "foundry-hosting-responses" + USER_AGENT_PREFIX = "foundry-hosting" def __init__( self, From 6f85956fd9cfbc3fdec2a0734b91108a083ce9cb Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Fri, 10 Apr 2026 10:00:16 -0700 Subject: [PATCH 13/13] Update user agent prefix doc --- python/packages/core/agent_framework/_telemetry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/packages/core/agent_framework/_telemetry.py b/python/packages/core/agent_framework/_telemetry.py index f4d259806d..8e59704b8a 100644 --- a/python/packages/core/agent_framework/_telemetry.py +++ b/python/packages/core/agent_framework/_telemetry.py @@ -36,7 +36,7 @@ def append_to_user_agent(prefix: str) -> None: Duplicate prefixes are ignored. Args: - prefix: The prefix to prepend (e.g. "foundry-hosting-responses"). + prefix: The prefix to prepend (e.g. "foundry-hosting"). """ if prefix and prefix not in _user_agent_prefixes: _user_agent_prefixes.append(prefix)