From e33e720b26356a794d869c9efbfee05a514c063d Mon Sep 17 00:00:00 2001 From: Cody Date: Fri, 3 Apr 2026 23:06:31 -0400 Subject: [PATCH] feat(py): add auth params to chat() and create Python tests Expose auth_bearer, auth_header, auth_value in the high-level chat() wrapper to match _chat_raw(). Add test_api.py with unit tests and e2e tests (marked, skipped by default). Co-Authored-By: Claude Opus 4.6 (1M context) --- py/stringflow/__init__.py | 14 +++++- py/stringflow/test_api.py | 92 +++++++++++++++++++++++++++++++++++++++ pyproject.toml | 4 ++ 3 files changed, 109 insertions(+), 1 deletion(-) create mode 100644 py/stringflow/test_api.py diff --git a/py/stringflow/__init__.py b/py/stringflow/__init__.py index 412e968..a140edf 100644 --- a/py/stringflow/__init__.py +++ b/py/stringflow/__init__.py @@ -16,6 +16,9 @@ def chat( wire_format: str = "messages", model: str | None = None, max_tokens: int | None = None, + auth_bearer: str | None = None, + auth_header: str | None = None, + auth_value: str | None = None, ) -> list[Message]: """Chat with an LLM. Returns conversation history you can pass back in. @@ -35,7 +38,16 @@ def chat( ) try: - response = _chat_raw(base_url, messages, wire_format, model, max_tokens) + response = _chat_raw( + base_url, + messages, + wire_format, + model, + max_tokens, + auth_bearer, + auth_header, + auth_value, + ) except ConnectionError as e: raise ConnectionError( f"cannot reach LLM server at {base_url} — is dkdc-ai running?\n" diff --git a/py/stringflow/test_api.py b/py/stringflow/test_api.py new file mode 100644 index 0000000..1992bf1 --- /dev/null +++ b/py/stringflow/test_api.py @@ -0,0 +1,92 @@ +"""Tests for the stringflow Python API. + +E2E tests require a running llama-server on localhost:8080. +Run with: uv run pytest py/stringflow/test_api.py +""" + +import pytest + +import stringflow as sf + + +# ============================================================================ +# Unit tests (no server required) +# ============================================================================ + + +class TestChatInput: + def test_string_message_builds_history(self): + """chat() should accept a string and build a user message tuple.""" + # We can't call chat() without a server, but we can test the TypeError path + with pytest.raises(TypeError): + sf.chat(42) # type: ignore + + def test_rejects_invalid_type(self): + with pytest.raises(TypeError, match="must be str or list"): + sf.chat(123) # type: ignore + + def test_connection_error_without_server(self): + """chat() should raise ConnectionError when no server is running.""" + with pytest.raises((ConnectionError, Exception)): + sf.chat("hi", base_url="http://localhost:19999") + + +class TestDefaults: + def test_default_url(self): + assert sf.DEFAULT_URL == "http://localhost:8080" + + def test_exports(self): + assert hasattr(sf, "chat") + assert hasattr(sf, "health_check") + assert hasattr(sf, "DEFAULT_URL") + assert hasattr(sf, "Message") + + +# ============================================================================ +# E2E tests (require running llama-server on localhost:8080) +# ============================================================================ + + +@pytest.mark.e2e +class TestChatE2E: + def test_simple_chat(self): + result = sf.chat("Reply with exactly the word 'pong' and nothing else.") + assert isinstance(result, list) + assert len(result) == 2 + assert result[0] == ( + "user", + "Reply with exactly the word 'pong' and nothing else.", + ) + assert result[1][0] == "assistant" + assert len(result[1][1]) > 0 + + def test_multi_turn(self): + r1 = sf.chat("My name is TestBot.") + assert len(r1) == 2 + r2 = sf.chat("What is my name?", r1) + assert len(r2) == 4 + assert r2[2] == ("user", "What is my name?") + assert r2[3][0] == "assistant" + + def test_message_list_input(self): + messages = [("user", "Reply with exactly 'hello'.")] + result = sf.chat(messages) + assert len(result) == 2 + assert result[1][0] == "assistant" + + def test_wire_format_completions(self): + result = sf.chat("Say hi.", wire_format="completions") + assert len(result) == 2 + assert result[1][0] == "assistant" + + def test_wire_format_responses(self): + result = sf.chat("Say hi.", wire_format="responses") + assert len(result) == 2 + assert result[1][0] == "assistant" + + +@pytest.mark.e2e +class TestHealthCheckE2E: + def test_health_check(self): + result = sf.health_check() + assert isinstance(result, str) diff --git a/pyproject.toml b/pyproject.toml index a4c266e..93e81c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,10 @@ python-packages = ["stringflow"] python-source = "py" manifest-path = "crates/stringflow-py/Cargo.toml" +[tool.pytest.ini_options] +markers = ["e2e: end-to-end tests requiring a running llama-server"] +addopts = "-m 'not e2e'" + [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin"