From 7499abd0d9b2d8a7205e63eeeb361d23445de865 Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Thu, 20 Nov 2025 16:20:08 -0500 Subject: [PATCH 1/3] fix: add details to litellm init for model_id --- mellea/backends/litellm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mellea/backends/litellm.py b/mellea/backends/litellm.py index e7b9b9c8..555431c5 100644 --- a/mellea/backends/litellm.py +++ b/mellea/backends/litellm.py @@ -54,12 +54,12 @@ def __init__( base_url: str | None = "http://localhost:11434", model_options: dict | None = None, ): - """Initialize and OpenAI compatible backend. For any additional kwargs that you need to pass the the client, pass them as a part of **kwargs. + """Initialize an OpenAI compatible backend using the [LiteLLM Python SDK](https://docs.litellm.ai/docs/#litellm-python-sdk). Note: If getting `Unclosed client session`, set `export DISABLE_AIOHTTP_TRANSPORT=True` in your environment. See: https://github.com/BerriAI/litellm/issues/13251. Args: - model_id : The LiteLLM model identifier. Make sure that all necessary credentials are in OS environment variables. + model_id : The LiteLLM model identifier; in most cases requires some combination of `//`. Make sure that all necessary credentials are in OS environment variables. formatter: A custom formatter based on backend.If None, defaults to TemplateFormatter base_url : Base url for LLM API. Defaults to None. model_options : Generation options to pass to the LLM. Defaults to None. From 262182598429d0867dc851b76d2a43f91f2d0c28 Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Thu, 20 Nov 2025 16:32:01 -0500 Subject: [PATCH 2/3] fix: litellm ollama test options --- test/backends/test_litellm_ollama.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/backends/test_litellm_ollama.py b/test/backends/test_litellm_ollama.py index f6f10807..1422eb78 100644 --- a/test/backends/test_litellm_ollama.py +++ b/test/backends/test_litellm_ollama.py @@ -111,12 +111,6 @@ def test_litellm_ollama_instruct_options(session): ModelOption.SEED: 123, ModelOption.TEMPERATURE: 0.5, ModelOption.MAX_NEW_TOKENS: 100, - - # Ollama thinking controls currently broken on Granite; see - # https://github.com/ollama/ollama/issues/10983 - # TODO: Re-enable when this upstream bug gets fixed. - #ModelOption.THINKING: True, - #"reasoning_effort": True, "homer_simpson": "option should be kicked out", } From fcb51370d985e4fc4d1c00eebfa5141b277d27e3 Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Thu, 20 Nov 2025 17:13:29 -0500 Subject: [PATCH 3/3] fix: remove LLMaJ template; uses Requirement template --- .../prompts/default/LLMaJRequirement.jinja2 | 15 --------------- test/stdlib_basics/test_requirement.py | 12 +++++++++++- 2 files changed, 11 insertions(+), 16 deletions(-) delete mode 100644 mellea/templates/prompts/default/LLMaJRequirement.jinja2 diff --git a/mellea/templates/prompts/default/LLMaJRequirement.jinja2 b/mellea/templates/prompts/default/LLMaJRequirement.jinja2 deleted file mode 100644 index 21c895ad..00000000 --- a/mellea/templates/prompts/default/LLMaJRequirement.jinja2 +++ /dev/null @@ -1,15 +0,0 @@ -Please check the following requirement against the following output. -Reply with 'yes' if the requirement is satisfied and 'no' otherwise. -Do not include any other text in your response. - -{%- block output -%} -{% if output %} - Output: {{ output -}} -{%- endif -%} -{% endblock output%} - -{%- block description -%} -{% if description %} - Requirement: {{ description -}} -{%- endif -%} -{% endblock description %} diff --git a/test/stdlib_basics/test_requirement.py b/test/stdlib_basics/test_requirement.py index b836664a..9502821c 100644 --- a/test/stdlib_basics/test_requirement.py +++ b/test/stdlib_basics/test_requirement.py @@ -1,7 +1,7 @@ import asyncio import pytest from mellea.stdlib.base import ChatContext, ModelOutputThunk -from mellea.stdlib.requirement import Requirement, simple_validate +from mellea.stdlib.requirement import LLMaJRequirement, Requirement, simple_validate from mellea.stdlib.session import start_session ctx = ChatContext() @@ -18,6 +18,16 @@ async def test_llmaj_validation_req_output_field(): "requirement's output shouldn't be updated during/after validation" ) +async def test_llmaj_requirement_uses_requirement_template(): + m = start_session(ctx=ctx) + req = LLMaJRequirement("Must output test.") + assert req._output is None + + _ = await req.validate(m.backend, ctx=ctx) + assert req._output is None, ( + "requirement's output shouldn't be updated during/after validation" + ) + def test_simple_validate_bool(): validation_func = simple_validate(lambda x: False, reason="static reason")