From 1050436d010606561eb4877a7552aa9771235772 Mon Sep 17 00:00:00 2001 From: RandomOscillations Date: Mon, 16 Feb 2026 21:21:06 -0500 Subject: [PATCH 1/2] fix(simulation): reuse cached async providers in simple_call_async --- extropy/core/llm.py | 12 ++-- extropy/core/providers/__init__.py | 12 ++-- tests/test_llm.py | 96 ++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 12 deletions(-) create mode 100644 tests/test_llm.py diff --git a/extropy/core/llm.py b/extropy/core/llm.py index dfbc3db..1872eda 100644 --- a/extropy/core/llm.py +++ b/extropy/core/llm.py @@ -11,7 +11,7 @@ Configure via `extropy config` CLI or programmatically via extropy.config.configure(). """ -from .providers import get_provider +from .providers import get_provider, get_simulation_provider from .providers.base import TokenUsage, ValidatorCallback, RetryCallback from ..config import get_config, parse_model_string @@ -80,12 +80,10 @@ async def simple_call_async( Model is passed explicitly from simulation caller (provider/model format). Returns (structured_data, token_usage) tuple. """ - if model: - provider, model_name = _resolve_provider_and_model(model) - else: - config = get_config() - model_string = config.resolve_sim_strong() - provider, model_name = _resolve_provider_and_model(model_string) + config = get_config() + model_string = model or config.resolve_sim_strong() + _, model_name = parse_model_string(model_string) + provider = get_simulation_provider(model_string) return await provider.simple_call_async( prompt=prompt, response_schema=response_schema, diff --git a/extropy/core/providers/__init__.py b/extropy/core/providers/__init__.py index f763ded..cdead1d 100644 --- a/extropy/core/providers/__init__.py +++ b/extropy/core/providers/__init__.py @@ -178,14 +178,16 @@ def get_pipeline_provider() -> LLMProvider: return _get_or_create_provider(provider, f"pipeline:{provider}") -def get_simulation_provider() -> LLMProvider: - """Get the cached provider for simulation phase (agent reasoning). +def get_simulation_provider(model_string: str | None = None) -> LLMProvider: + """Get a cached provider for simulation phase async calls. - Uses the provider from the resolved simulation strong model. + Args: + model_string: Optional explicit model string ("provider/model"). If + omitted, uses resolved simulation strong model from config. """ config = get_config() - strong_model = config.resolve_sim_strong() - provider, _ = parse_model_string(strong_model) + resolved_model = model_string or config.resolve_sim_strong() + provider, _ = parse_model_string(resolved_model) return _get_or_create_provider(provider, f"simulation:{provider}") diff --git a/tests/test_llm.py b/tests/test_llm.py new file mode 100644 index 0000000..7a62d5e --- /dev/null +++ b/tests/test_llm.py @@ -0,0 +1,96 @@ +import asyncio +from types import SimpleNamespace +from unittest.mock import AsyncMock, MagicMock, Mock + +from extropy.core import llm +from extropy.core.providers.base import TokenUsage + + +def test_simple_call_async_uses_cached_simulation_provider_default_model(monkeypatch): + provider = MagicMock() + provider.simple_call_async = AsyncMock( + return_value=({"ok": True}, TokenUsage(input_tokens=11, output_tokens=7)) + ) + config = SimpleNamespace(resolve_sim_strong=lambda: "openai/gpt-5", providers={}) + get_sim_provider = Mock(return_value=provider) + + monkeypatch.setattr(llm, "get_config", lambda: config) + monkeypatch.setattr(llm, "get_simulation_provider", get_sim_provider) + monkeypatch.setattr( + llm, + "get_provider", + Mock(side_effect=AssertionError("simple_call_async should use simulation cache")), + ) + + result, usage = asyncio.run( + llm.simple_call_async( + prompt="hello", + response_schema={"type": "object"}, + schema_name="response", + ) + ) + + assert result == {"ok": True} + assert usage.input_tokens == 11 + assert usage.output_tokens == 7 + get_sim_provider.assert_called_once_with("openai/gpt-5") + provider.simple_call_async.assert_awaited_once() + assert provider.simple_call_async.await_args.kwargs["model"] == "gpt-5" + + +def test_simple_call_async_uses_cached_simulation_provider_for_explicit_model(monkeypatch): + provider = MagicMock() + provider.simple_call_async = AsyncMock(return_value=({"ok": True}, TokenUsage())) + config = SimpleNamespace(resolve_sim_strong=lambda: "openai/gpt-5", providers={}) + get_sim_provider = Mock(return_value=provider) + + monkeypatch.setattr(llm, "get_config", lambda: config) + monkeypatch.setattr(llm, "get_simulation_provider", get_sim_provider) + monkeypatch.setattr( + llm, + "get_provider", + Mock(side_effect=AssertionError("explicit async model should still use simulation cache")), + ) + + asyncio.run( + llm.simple_call_async( + prompt="hello", + response_schema={"type": "object"}, + schema_name="response", + model="anthropic/claude-sonnet-4-5", + ) + ) + + get_sim_provider.assert_called_once_with("anthropic/claude-sonnet-4-5") + provider.simple_call_async.assert_awaited_once() + assert ( + provider.simple_call_async.await_args.kwargs["model"] + == "claude-sonnet-4-5" + ) + + +def test_simple_call_sync_path_still_uses_regular_provider_factory(monkeypatch): + provider = MagicMock() + provider.simple_call.return_value = {"ok": True} + config = SimpleNamespace(resolve_pipeline_fast=lambda: "openai/gpt-5-mini", providers={}) + get_provider = Mock(return_value=provider) + + monkeypatch.setattr(llm, "get_config", lambda: config) + monkeypatch.setattr(llm, "get_provider", get_provider) + monkeypatch.setattr( + llm, + "get_simulation_provider", + Mock(side_effect=AssertionError("sync calls should not use simulation provider cache")), + ) + + result = llm.simple_call( + prompt="hello", + response_schema={"type": "object"}, + schema_name="response", + model="openai/gpt-5-mini", + ) + + assert result == {"ok": True} + get_provider.assert_called_once_with("openai", config.providers) + provider.simple_call.assert_called_once() + assert provider.simple_call.call_args.kwargs["model"] == "gpt-5-mini" From 28640aad1663a866c5369f815c9e273ce47a6f35 Mon Sep 17 00:00:00 2001 From: RandomOscillations Date: Mon, 16 Feb 2026 21:26:43 -0500 Subject: [PATCH 2/2] style: format llm regression tests with ruff --- tests/test_llm.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/tests/test_llm.py b/tests/test_llm.py index 7a62d5e..a45bd3a 100644 --- a/tests/test_llm.py +++ b/tests/test_llm.py @@ -19,7 +19,9 @@ def test_simple_call_async_uses_cached_simulation_provider_default_model(monkeyp monkeypatch.setattr( llm, "get_provider", - Mock(side_effect=AssertionError("simple_call_async should use simulation cache")), + Mock( + side_effect=AssertionError("simple_call_async should use simulation cache") + ), ) result, usage = asyncio.run( @@ -38,7 +40,9 @@ def test_simple_call_async_uses_cached_simulation_provider_default_model(monkeyp assert provider.simple_call_async.await_args.kwargs["model"] == "gpt-5" -def test_simple_call_async_uses_cached_simulation_provider_for_explicit_model(monkeypatch): +def test_simple_call_async_uses_cached_simulation_provider_for_explicit_model( + monkeypatch, +): provider = MagicMock() provider.simple_call_async = AsyncMock(return_value=({"ok": True}, TokenUsage())) config = SimpleNamespace(resolve_sim_strong=lambda: "openai/gpt-5", providers={}) @@ -49,7 +53,11 @@ def test_simple_call_async_uses_cached_simulation_provider_for_explicit_model(mo monkeypatch.setattr( llm, "get_provider", - Mock(side_effect=AssertionError("explicit async model should still use simulation cache")), + Mock( + side_effect=AssertionError( + "explicit async model should still use simulation cache" + ) + ), ) asyncio.run( @@ -63,16 +71,15 @@ def test_simple_call_async_uses_cached_simulation_provider_for_explicit_model(mo get_sim_provider.assert_called_once_with("anthropic/claude-sonnet-4-5") provider.simple_call_async.assert_awaited_once() - assert ( - provider.simple_call_async.await_args.kwargs["model"] - == "claude-sonnet-4-5" - ) + assert provider.simple_call_async.await_args.kwargs["model"] == "claude-sonnet-4-5" def test_simple_call_sync_path_still_uses_regular_provider_factory(monkeypatch): provider = MagicMock() provider.simple_call.return_value = {"ok": True} - config = SimpleNamespace(resolve_pipeline_fast=lambda: "openai/gpt-5-mini", providers={}) + config = SimpleNamespace( + resolve_pipeline_fast=lambda: "openai/gpt-5-mini", providers={} + ) get_provider = Mock(return_value=provider) monkeypatch.setattr(llm, "get_config", lambda: config) @@ -80,7 +87,11 @@ def test_simple_call_sync_path_still_uses_regular_provider_factory(monkeypatch): monkeypatch.setattr( llm, "get_simulation_provider", - Mock(side_effect=AssertionError("sync calls should not use simulation provider cache")), + Mock( + side_effect=AssertionError( + "sync calls should not use simulation provider cache" + ) + ), ) result = llm.simple_call(