Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion py/stringflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ def chat(
wire_format: str = "messages",
model: str | None = None,
max_tokens: int | None = None,
auth_bearer: str | None = None,
auth_header: str | None = None,
auth_value: str | None = None,
) -> list[Message]:
"""Chat with an LLM. Returns conversation history you can pass back in.

Expand All @@ -35,7 +38,16 @@ def chat(
)

try:
response = _chat_raw(base_url, messages, wire_format, model, max_tokens)
response = _chat_raw(
base_url,
messages,
wire_format,
model,
max_tokens,
auth_bearer,
auth_header,
auth_value,
)
except ConnectionError as e:
raise ConnectionError(
f"cannot reach LLM server at {base_url} — is dkdc-ai running?\n"
Expand Down
92 changes: 92 additions & 0 deletions py/stringflow/test_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
"""Tests for the stringflow Python API.

E2E tests require a running llama-server on localhost:8080.
Run with: uv run pytest py/stringflow/test_api.py
"""

import pytest

import stringflow as sf


# ============================================================================
# Unit tests (no server required)
# ============================================================================


class TestChatInput:
def test_string_message_builds_history(self):
"""chat() should accept a string and build a user message tuple."""
# We can't call chat() without a server, but we can test the TypeError path
with pytest.raises(TypeError):
sf.chat(42) # type: ignore

def test_rejects_invalid_type(self):
with pytest.raises(TypeError, match="must be str or list"):
sf.chat(123) # type: ignore

def test_connection_error_without_server(self):
"""chat() should raise ConnectionError when no server is running."""
with pytest.raises((ConnectionError, Exception)):
sf.chat("hi", base_url="http://localhost:19999")


class TestDefaults:
def test_default_url(self):
assert sf.DEFAULT_URL == "http://localhost:8080"

def test_exports(self):
assert hasattr(sf, "chat")
assert hasattr(sf, "health_check")
assert hasattr(sf, "DEFAULT_URL")
assert hasattr(sf, "Message")


# ============================================================================
# E2E tests (require running llama-server on localhost:8080)
# ============================================================================


@pytest.mark.e2e
class TestChatE2E:
def test_simple_chat(self):
result = sf.chat("Reply with exactly the word 'pong' and nothing else.")
assert isinstance(result, list)
assert len(result) == 2
assert result[0] == (
"user",
"Reply with exactly the word 'pong' and nothing else.",
)
assert result[1][0] == "assistant"
assert len(result[1][1]) > 0

def test_multi_turn(self):
r1 = sf.chat("My name is TestBot.")
assert len(r1) == 2
r2 = sf.chat("What is my name?", r1)
assert len(r2) == 4
assert r2[2] == ("user", "What is my name?")
assert r2[3][0] == "assistant"

def test_message_list_input(self):
messages = [("user", "Reply with exactly 'hello'.")]
result = sf.chat(messages)
assert len(result) == 2
assert result[1][0] == "assistant"

def test_wire_format_completions(self):
result = sf.chat("Say hi.", wire_format="completions")
assert len(result) == 2
assert result[1][0] == "assistant"

def test_wire_format_responses(self):
result = sf.chat("Say hi.", wire_format="responses")
assert len(result) == 2
assert result[1][0] == "assistant"


@pytest.mark.e2e
class TestHealthCheckE2E:
def test_health_check(self):
result = sf.health_check()
assert isinstance(result, str)
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ python-packages = ["stringflow"]
python-source = "py"
manifest-path = "crates/stringflow-py/Cargo.toml"

[tool.pytest.ini_options]
markers = ["e2e: end-to-end tests requiring a running llama-server"]
addopts = "-m 'not e2e'"

[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
Expand Down
Loading