Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions mellea/backends/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,12 @@ def __init__(
base_url: str | None = "http://localhost:11434",
model_options: dict | None = None,
):
"""Initialize and OpenAI compatible backend. For any additional kwargs that you need to pass the the client, pass them as a part of **kwargs.
"""Initialize an OpenAI compatible backend using the [LiteLLM Python SDK](https://docs.litellm.ai/docs/#litellm-python-sdk).

Note: If getting `Unclosed client session`, set `export DISABLE_AIOHTTP_TRANSPORT=True` in your environment. See: https://github.com/BerriAI/litellm/issues/13251.

Args:
model_id : The LiteLLM model identifier. Make sure that all necessary credentials are in OS environment variables.
model_id : The LiteLLM model identifier; in most cases requires some combination of `<provider>/<model_creator>/<model_name>`. Make sure that all necessary credentials are in OS environment variables.
formatter: A custom formatter based on backend.If None, defaults to TemplateFormatter
base_url : Base url for LLM API. Defaults to None.
model_options : Generation options to pass to the LLM. Defaults to None.
Expand Down
15 changes: 0 additions & 15 deletions mellea/templates/prompts/default/LLMaJRequirement.jinja2

This file was deleted.

6 changes: 0 additions & 6 deletions test/backends/test_litellm_ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,6 @@ def test_litellm_ollama_instruct_options(session):
ModelOption.SEED: 123,
ModelOption.TEMPERATURE: 0.5,
ModelOption.MAX_NEW_TOKENS: 100,

# Ollama thinking controls currently broken on Granite; see
# https://github.com/ollama/ollama/issues/10983
# TODO: Re-enable when this upstream bug gets fixed.
#ModelOption.THINKING: True,
#"reasoning_effort": True,
"homer_simpson": "option should be kicked out",
}

Expand Down
12 changes: 11 additions & 1 deletion test/stdlib_basics/test_requirement.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import asyncio
import pytest
from mellea.stdlib.base import ChatContext, ModelOutputThunk
from mellea.stdlib.requirement import Requirement, simple_validate
from mellea.stdlib.requirement import LLMaJRequirement, Requirement, simple_validate
from mellea.stdlib.session import start_session

ctx = ChatContext()
Expand All @@ -18,6 +18,16 @@ async def test_llmaj_validation_req_output_field():
"requirement's output shouldn't be updated during/after validation"
)

async def test_llmaj_requirement_uses_requirement_template():
m = start_session(ctx=ctx)
req = LLMaJRequirement("Must output test.")
assert req._output is None

_ = await req.validate(m.backend, ctx=ctx)
assert req._output is None, (
"requirement's output shouldn't be updated during/after validation"
)


def test_simple_validate_bool():
validation_func = simple_validate(lambda x: False, reason="static reason")
Expand Down