From d4b1e0dfee11ab3a62ed78d05e7ce9bfeb809131 Mon Sep 17 00:00:00 2001 From: Newcoderorigin Date: Sat, 4 Oct 2025 16:49:10 -0500 Subject: [PATCH] Add LM Studio integration, guards, and smoke checks --- .github/workflows/smoke.yml | 22 +++ configs/ui.yml | 10 ++ scripts/smoke.sh | 8 + tests/gui/test_live_tab_wiring.py | 168 ++++++++++++++++- tests/test_config_schema.py | 26 +++ tests/test_lmstudio_client.py | 230 ++++++++++-------------- tests/test_runtime_guard.py | 34 ++++ toptek/core/ui_config.py | 104 +++++++++++ toptek/filters.py | 53 ++++++ toptek/gui/__init__.py | 7 + toptek/gui/app.py | 9 + toptek/gui/live_tab.py | 290 ++++++++++++++++++++++++++++++ toptek/lmstudio.py | 175 ++++++++++++++++++ toptek/runtime_guard.py | 24 +++ toptek/ui/live_tab.py | 7 + 15 files changed, 1024 insertions(+), 143 deletions(-) create mode 100644 .github/workflows/smoke.yml create mode 100755 scripts/smoke.sh create mode 100644 tests/test_config_schema.py create mode 100644 tests/test_runtime_guard.py create mode 100644 toptek/filters.py create mode 100644 toptek/gui/live_tab.py create mode 100644 toptek/lmstudio.py create mode 100644 toptek/runtime_guard.py create mode 100644 toptek/ui/live_tab.py diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml new file mode 100644 index 0000000..cdfd65e --- /dev/null +++ b/.github/workflows/smoke.yml @@ -0,0 +1,22 @@ +name: Smoke + +on: + push: + branches: + - main + pull_request: + +jobs: + smoke: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r toptek/requirements-lite.txt + - name: Run smoke checks + run: ./scripts/smoke.sh diff --git a/configs/ui.yml b/configs/ui.yml index 7ee6b90..cd1118c 100644 --- a/configs/ui.yml +++ b/configs/ui.yml @@ -14,6 +14,16 @@ chart: fps: 12 max_points: 180 price_decimals: 2 +lmstudio: + enabled: true + base_url: "http://localhost:1234/v1" + api_key: "lm-studio" + model: "llama-3.1-8b-instruct" + system_prompt: "You are the Autostealth Evolution assistant. Follow V10 ZERO-CONS." + max_tokens: 512 + temperature: 0.0 + top_p: 1.0 + timeout_s: 30 status: login: idle: "Awaiting verification" diff --git a/scripts/smoke.sh b/scripts/smoke.sh new file mode 100755 index 0000000..4a6a67e --- /dev/null +++ b/scripts/smoke.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail + +echo "[smoke] compiling python modules" +python -m compileall toptek >/dev/null + +echo "[smoke] running targeted tests" +pytest -q tests/test_config_schema.py tests/test_lmstudio_client.py diff --git a/tests/gui/test_live_tab_wiring.py b/tests/gui/test_live_tab_wiring.py index b4caa54..28124c4 100644 --- a/tests/gui/test_live_tab_wiring.py +++ b/tests/gui/test_live_tab_wiring.py @@ -1,4 +1,10 @@ +"""Behavioural wiring tests for the Live tab widget.""" + +from __future__ import annotations + import sys +from pathlib import Path +from typing import Any, Dict, List, Tuple import pytest @@ -6,19 +12,165 @@ sys.modules.setdefault("core", core_package) tk = pytest.importorskip("tkinter") +from tkinter import ttk # noqa: E402 + +from toptek.core import utils # noqa: E402 try: # Prefer dedicated Live tab module when present from toptek.gui.live_tab import LiveTab # type: ignore -except ModuleNotFoundError: +except ModuleNotFoundError: # pragma: no cover - legacy builds without the Live tab + from toptek.gui.widgets import LiveTab # type: ignore # noqa: F401 + + +@pytest.fixture +def tk_root() -> Any: try: - from toptek.gui.widgets import LiveTab # type: ignore - except (ModuleNotFoundError, ImportError, AttributeError): - LiveTab = None # type: ignore + root = tk.Tk() + except tk.TclError as exc: # pragma: no cover - depends on CI environment + pytest.skip(f"Tk unavailable: {exc}") + root.withdraw() + yield root + root.destroy() + + +def _paths(base: Path) -> utils.AppPaths: + return utils.AppPaths(root=base, cache=base / "cache", models=base / "models") + + +def _build_tab( + root: Any, + tmp_path: Path, + configs: Dict[str, Dict[str, object]], + **kwargs: Any, +) -> LiveTab: + notebook = ttk.Notebook(root) + notebook.pack() + return LiveTab(notebook, configs, _paths(tmp_path), **kwargs) + + +def test_live_tab_metrics_visibility_toggle(tk_root: Any, tmp_path: Path) -> None: + configs: Dict[str, Dict[str, object]] = {"live": {"defaults": {"symbol": "ES"}}} + tab = _build_tab(tk_root, tmp_path, configs) + + assert tab.metrics_frame.winfo_manager() + + tab.metrics_visible.set(False) + tab._update_metrics_visibility() + assert not tab.metrics_frame.winfo_manager() + + tab.metrics_visible.set(True) + tab._update_metrics_visibility() + assert tab.metrics_frame.winfo_manager() + + +def test_live_tab_compose_request_uses_config_defaults( + tk_root: Any, tmp_path: Path +) -> None: + configs: Dict[str, Dict[str, object]] = { + "live": { + "defaults": { + "account_id": "ACC-1", + "symbol": "MESU4", + "quantity": 3, + "order_type": "LIMIT", + "time_in_force": "GTC", + "route": "LIVE", + "limit_price": 4321.0, + "stop_price": "", + } + } + } + tab = _build_tab(tk_root, tmp_path, configs) + + tab.account_var.set("") + tab.symbol_var.set("") + tab.quantity_var.set("") + tab.order_type_var.set("") + tab.tif_var.set("") + tab.route_var.set("") + tab.limit_var.set("") + tab.stop_var.set("") + + request = tab.compose_request() + + assert request["account_id"] == "ACC-1" + assert request["symbol"] == "MESU4" + assert request["quantity"] == 3 + assert request["order_type"] == "LIMIT" + assert request["time_in_force"] == "GTC" + assert request["route"] == "LIVE" + assert request["limit_price"] == 4321.0 + assert request["stop_price"] == "" + assert configs["live"]["last_request"] == request + redacted = configs["live"].get("last_request_redacted") + assert redacted + assert redacted["symbol"] == "[REDACTED_TICKER]" + assert redacted["account_id"] == "[REDACTED_TICKER]" + assert tab.request_defaults["symbol"] == "MESU4" + + +def test_live_tab_submit_order_handles_success_and_error( + tk_root: Any, tmp_path: Path +) -> None: + success_events: List[Tuple[Dict[str, Any], Dict[str, Any]]] = [] + error_events: List[Tuple[Dict[str, Any], Exception]] = [] + + class RecordingClient: + def __init__(self, response: Any) -> None: + self.response = response + self.calls: List[Dict[str, Any]] = [] + + def place_order(self, payload: Dict[str, Any]) -> Dict[str, Any]: + self.calls.append(payload) + if isinstance(self.response, Exception): + raise self.response + return self.response + + configs: Dict[str, Dict[str, object]] = {"live": {}} + client = RecordingClient({"status": "ACCEPTED", "id": "123"}) + tab = _build_tab(tk_root, tmp_path, configs, client=client) + tab.register_callbacks( + on_success=lambda payload, response: success_events.append((payload, response)), + on_error=lambda payload, exc: error_events.append((payload, exc)), + ) + + response = tab.submit_order() + assert response == {"status": "ACCEPTED", "id": "123"} + assert len(client.calls) == 1 + assert success_events and success_events[0][1]["id"] == "123" + assert not error_events + assert tab.metrics_state["orders_sent"] == 1 + assert tab.status_var.get().startswith("Order ACCEPTED") + + failing = RecordingClient(ValueError("route unavailable")) + tab.client = failing + tab.submit_order() + assert error_events and isinstance(error_events[-1][1], ValueError) + assert tab.metrics_state["errors"] == 1 + assert tab.status_var.get().startswith("Error:") + metrics = configs["live"].get("metrics") + assert isinstance(metrics, dict) + assert metrics.get("orders_sent") == 1 + assert metrics.get("errors") == 1 + -if LiveTab is None: # pragma: no cover - legacy builds without the Live tab - pytest.skip("Live tab implementation unavailable", allow_module_level=True) +def test_live_tab_refresh_metrics_uses_fetcher( + tk_root: Any, tmp_path: Path +) -> None: + calls: List[int] = [] + def metrics_fetcher() -> Dict[str, Any]: + calls.append(1) + return {"latency_ms": 42, "fills": 5} -def test_live_tab_placeholder() -> None: # pragma: no cover - executed when LiveTab exists - pytest.skip("Live tab behaviour tests require the implementation module") + configs: Dict[str, Dict[str, object]] = {"live": {}} + tab = _build_tab(tk_root, tmp_path, configs, metrics_fetcher=metrics_fetcher) + tab.metrics_state["orders_sent"] = 7 + metrics = tab.refresh_metrics() + assert calls # fetcher invoked + assert metrics["latency_ms"] == 42 + assert metrics["fills"] == 5 + buffer = tab.metrics_output.get("1.0", "end-1c") + assert '"latency_ms": 42' in buffer + assert configs["live"]["metrics"]["fills"] == 5 diff --git a/tests/test_config_schema.py b/tests/test_config_schema.py new file mode 100644 index 0000000..8800bac --- /dev/null +++ b/tests/test_config_schema.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from pathlib import Path + +from toptek.core import ui_config + + +def test_ui_config_includes_lmstudio_defaults(tmp_path: Path) -> None: + config_path = tmp_path / "ui.yml" + config_path.write_text("{}\n", encoding="utf-8") + + cfg = ui_config.load_ui_config(config_path, env={}) + + assert cfg.lmstudio.enabled is True + assert cfg.lmstudio.base_url == "http://localhost:1234/v1" + assert cfg.lmstudio.model == "llama-3.1-8b-instruct" + assert cfg.lmstudio.max_tokens == 512 + assert cfg.as_dict()["lmstudio"]["temperature"] == 0.0 + + +def test_repository_ui_config_matches_schema() -> None: + project_cfg = ui_config.load_ui_config(Path("configs/ui.yml"), env={}) + lmstudio = project_cfg.lmstudio + assert lmstudio.enabled is True + assert lmstudio.timeout_s == 30 + assert "Autostealth Evolution" in lmstudio.system_prompt diff --git a/tests/test_lmstudio_client.py b/tests/test_lmstudio_client.py index 07fe1c9..e6d1ff1 100644 --- a/tests/test_lmstudio_client.py +++ b/tests/test_lmstudio_client.py @@ -1,161 +1,117 @@ from __future__ import annotations -import asyncio +import json from dataclasses import dataclass -from typing import Any, AsyncIterator, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Tuple import pytest -from toptek.ai_server.config import AISettings -from toptek.ai_server.lmstudio import HTTPError, LMStudioClient +from toptek.lmstudio import HTTPError, LMStudioClient, Model @dataclass -class _FakeResponse: - status_code: int = 200 - json_payload: Dict[str, Any] | None = None +class _StubResponse: + status: int = 200 + body: str | bytes = "{}" lines: Iterable[str] | None = None - def raise_for_status(self) -> None: - if self.status_code >= 400: - raise HTTPError(f"HTTP status {self.status_code}") + def read(self) -> bytes: + if isinstance(self.body, bytes): + return self.body + return self.body.encode("utf-8") - def json(self) -> Dict[str, Any]: - return self.json_payload or {} - - async def aiter_lines(self) -> AsyncIterator[str]: + def iter_lines(self) -> Iterable[str]: for line in list(self.lines or []): - await asyncio.sleep(0) yield line -class _FakeStreamResponse(_FakeResponse): - async def __aenter__(self) -> "_FakeStreamResponse": - return self - - async def __aexit__( - self, - exc_type: Optional[type[BaseException]], - exc: Optional[BaseException], - tb: Optional[Any], - ) -> None: - return None +class _StubTransport: + def __init__(self) -> None: + self.routes: Dict[Tuple[str, str], Any] = {} + self.requests: List[Tuple[str, str, bytes | None, Dict[str, str] | None]] = [] + def add(self, method: str, url: str, response: Any) -> None: + self.routes[(method, url)] = response -class _StubAsyncClient: - def __init__(self) -> None: - self._routes: Dict[tuple[str, str], Any] = {} - self.stream_payloads: List[Dict[str, Any]] = [] - self.closed = False - - def add_get(self, path: str, response: Any) -> None: - self._routes[("GET", path)] = response - - def add_stream(self, path: str, factory: Any) -> None: - self._routes[("STREAM", path)] = factory - - async def get(self, path: str, *args: Any, **kwargs: Any) -> Any: - key = ("GET", path) - if key not in self._routes: - raise AssertionError(f"Unexpected GET {path}") - result = self._routes[key] - if isinstance(result, Exception): - raise result - return result - - def stream( + def request( self, method: str, - path: str, + url: str, *, - json: Dict[str, Any], - timeout: Any, - ) -> _FakeStreamResponse: - key = ("STREAM", path) - if key not in self._routes: - raise AssertionError(f"Unexpected stream {method} {path}") - factory = self._routes[key] - if isinstance(factory, Exception): - raise factory - self.stream_payloads.append(json) - response = factory() - if not isinstance(response, _FakeStreamResponse): - raise AssertionError("Stream factory must return _FakeStreamResponse") + data: bytes | None = None, + headers: Dict[str, str] | None = None, + timeout: float = 0.0, + ) -> _StubResponse: + self.requests.append((method, url, data, headers)) + key = (method, url) + if key not in self.routes: + raise AssertionError(f"Unexpected request {method} {url}") + response = self.routes[key] + if isinstance(response, Exception): + raise response return response - async def aclose(self) -> None: - self.closed = True - -def _settings() -> AISettings: - return AISettings( - base_url="http://localhost:1234/v1", - port=1234, - auto_start=False, - poll_interval_seconds=0.1, - poll_timeout_seconds=1.0, - default_model="stable", - default_role="system", - ) +def _settings() -> Dict[str, Any]: + return { + "base_url": "http://localhost:1234/v1", + "api_key": "lm-studio", + "timeout_s": 30, + } def test_list_models_success() -> None: - stub = _StubAsyncClient() - stub.add_get( - "/models", - _FakeResponse( - status_code=200, - json_payload={ - "data": [ - { - "id": "model-a", - "owned_by": "local", - "metadata": {"context_length": 8192, "display_name": "A"}, - "capabilities": {"tool_calls": True}, - }, - { - "id": "model-b", - "metadata": {"context_window": 4096}, - "performance": {"tokens_per_second": 40.5, "ttft": 120}, - }, - ] - }, + transport = _StubTransport() + url = "http://localhost:1234/v1/models" + transport.add( + "GET", + url, + _StubResponse( + status=200, + body=json.dumps( + { + "data": [ + { + "id": "model-a", + "owned_by": "local", + "metadata": {"context_length": 8192, "display_name": "Alpha"}, + }, + { + "id": "model-b", + "metadata": {"context_window": 4096}, + "description": "Beta", + }, + ] + } + ), ), ) - async def _run() -> None: - client = LMStudioClient(_settings(), client=stub) - models = await client.list_models() - - assert [model.model_id for model in models] == ["model-a", "model-b"] - assert models[0].supports_tools is True - assert models[0].max_context == 8192 - assert models[1].tokens_per_second == pytest.approx(40.5) - assert models[1].ttft == pytest.approx(120.0) + client = LMStudioClient(_settings(), transport=transport) + models = client.list_models() - asyncio.run(_run()) + assert isinstance(models[0], Model) + assert [model.model_id for model in models] == ["model-a", "model-b"] + assert models[0].max_context == 8192 + assert models[0].description == "Alpha" + assert models[1].max_context == 4096 def test_list_models_http_error() -> None: - stub = _StubAsyncClient() - stub.add_get("/models", _FakeResponse(status_code=503)) - async def _run() -> None: - client = LMStudioClient(_settings(), client=stub) - with pytest.raises(HTTPError): - await client.list_models() + transport = _StubTransport() + transport.add("GET", "http://localhost:1234/v1/models", _StubResponse(status=503)) - asyncio.run(_run()) + client = LMStudioClient(_settings(), transport=transport) + with pytest.raises(HTTPError): + client.list_models() -def test_health_handles_timeout() -> None: - stub = _StubAsyncClient() - stub.add_get("/models", HTTPError("timeout")) - async def _run() -> None: - client = LMStudioClient(_settings(), client=stub) - healthy = await client.health() - assert healthy is False +def test_health_handles_failure() -> None: + transport = _StubTransport() + transport.add("GET", "http://localhost:1234/v1/models", HTTPError("timeout")) - asyncio.run(_run()) + client = LMStudioClient(_settings(), transport=transport) + assert client.health() is False def test_chat_stream_temperature_zero_is_deterministic() -> None: @@ -164,23 +120,27 @@ def test_chat_stream_temperature_zero_is_deterministic() -> None: "", "data: {\"choices\":[{\"delta\":{\"content\":\"!\"}}]}", ] + transport = _StubTransport() + transport.add( + "POST", + "http://localhost:1234/v1/chat/completions", + _StubResponse(status=200, lines=lines), + ) - def factory() -> _FakeStreamResponse: - return _FakeStreamResponse(status_code=200, lines=lines) - - async def _run() -> None: - stub = _StubAsyncClient() - stub.add_stream("/chat/completions", factory) - - client = LMStudioClient(_settings(), client=stub) - payload = {"model": "model-a", "temperature": 0.0, "messages": []} + client = LMStudioClient(_settings(), transport=transport) + payload = {"model": "model-a", "temperature": 0.0, "messages": []} - first_run = [chunk async for chunk in client.chat_stream(payload)] - second_run = [chunk async for chunk in client.chat_stream(payload)] + first_run = list(client.chat_stream(payload)) + second_run = list(client.chat_stream(payload)) - assert first_run == [lines[0], lines[2]] - assert second_run == first_run - assert all(item["temperature"] == 0.0 for item in stub.stream_payloads) + assert first_run == [lines[0], lines[2]] + assert second_run == first_run - asyncio.run(_run()) + request_bodies = [ + json.loads(body.decode("utf-8")) + for _, _, body, _ in transport.requests + if body is not None + ] + for body in request_bodies: + assert body["temperature"] == 0.0 diff --git a/tests/test_runtime_guard.py b/tests/test_runtime_guard.py new file mode 100644 index 0000000..5837b24 --- /dev/null +++ b/tests/test_runtime_guard.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +import warnings +from types import SimpleNamespace + +import pytest + +from toptek import runtime_guard + + +def test_warn_if_unsupported_emits_warning(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + runtime_guard, + "sys", + SimpleNamespace( + version_info=SimpleNamespace(major=3, minor=12, micro=0, releaselevel="final", serial=0) + ), + ) + with pytest.warns(RuntimeWarning): + runtime_guard.warn_if_unsupported() + + +def test_warn_if_unsupported_noop_for_supported(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr( + runtime_guard, + "sys", + SimpleNamespace( + version_info=SimpleNamespace(major=3, minor=11, micro=9, releaselevel="final", serial=0) + ), + ) + with warnings.catch_warnings(record=True) as captured: + warnings.simplefilter("error") + runtime_guard.warn_if_unsupported() + assert not captured diff --git a/toptek/core/ui_config.py b/toptek/core/ui_config.py index 73bd8c7..50b24c8 100644 --- a/toptek/core/ui_config.py +++ b/toptek/core/ui_config.py @@ -38,6 +38,16 @@ def _coerce_int(value: Any, field_name: str, *, minimum: int | None = None) -> i return coerced +def _coerce_float(value: Any, field_name: str, *, minimum: float | None = None) -> float: + try: + coerced = float(value) + except (TypeError, ValueError) as exc: # pragma: no cover - defensive + raise ValueError(f"{field_name} must be a float") from exc + if minimum is not None and coerced < minimum: + raise ValueError(f"{field_name} must be >= {minimum}") + return coerced + + @dataclass(frozen=True) class ShellSettings: """Configuration for CLI shell defaults.""" @@ -155,6 +165,92 @@ def apply_environment(self, env: Mapping[str, str]) -> "ChartSettings": return replace(self, **updates) if updates else self +@dataclass(frozen=True) +class LMStudioSettings: + """Configuration for the local LM Studio API bridge.""" + + enabled: bool = True + base_url: str = "http://localhost:1234/v1" + api_key: str = "lm-studio" + model: str = "llama-3.1-8b-instruct" + system_prompt: str = ( + "You are the Autostealth Evolution assistant. Follow V10 ZERO-CONS." + ) + max_tokens: int = 512 + temperature: float = 0.0 + top_p: float = 1.0 + timeout_s: int = 30 + + @classmethod + def from_mapping(cls, data: Mapping[str, Any]) -> "LMStudioSettings": + enabled_raw = data.get("enabled", cls.enabled) + if isinstance(enabled_raw, str): + enabled_value = _coerce_bool(enabled_raw, "lmstudio.enabled") + elif isinstance(enabled_raw, bool): + enabled_value = enabled_raw + else: + enabled_value = bool(enabled_raw) + return cls( + enabled=enabled_value, + base_url=_coerce_str(data.get("base_url", cls.base_url), "lmstudio.base_url"), + api_key=_coerce_str(data.get("api_key", cls.api_key), "lmstudio.api_key"), + model=_coerce_str(data.get("model", cls.model), "lmstudio.model"), + system_prompt=_coerce_str( + data.get("system_prompt", cls.system_prompt), "lmstudio.system_prompt" + ), + max_tokens=_coerce_int( + data.get("max_tokens", cls.max_tokens), "lmstudio.max_tokens", minimum=1 + ), + temperature=_coerce_float( + data.get("temperature", cls.temperature), + "lmstudio.temperature", + minimum=0.0, + ), + top_p=_coerce_float( + data.get("top_p", cls.top_p), "lmstudio.top_p", minimum=0.0 + ), + timeout_s=_coerce_int( + data.get("timeout_s", cls.timeout_s), "lmstudio.timeout_s", minimum=1 + ), + ) + + def apply_environment(self, env: Mapping[str, str]) -> "LMStudioSettings": + updates: Dict[str, Any] = {} + if env.get("TOPTEK_LMSTUDIO_ENABLED"): + updates["enabled"] = _coerce_bool( + env["TOPTEK_LMSTUDIO_ENABLED"], "env.TOPTEK_LMSTUDIO_ENABLED" + ) + if env.get("TOPTEK_LMSTUDIO_BASE_URL"): + updates["base_url"] = env["TOPTEK_LMSTUDIO_BASE_URL"] + if env.get("TOPTEK_LMSTUDIO_API_KEY"): + updates["api_key"] = env["TOPTEK_LMSTUDIO_API_KEY"] + if env.get("TOPTEK_LMSTUDIO_MODEL"): + updates["model"] = env["TOPTEK_LMSTUDIO_MODEL"] + if env.get("TOPTEK_LMSTUDIO_MAX_TOKENS"): + updates["max_tokens"] = _coerce_int( + env["TOPTEK_LMSTUDIO_MAX_TOKENS"], + "env.TOPTEK_LMSTUDIO_MAX_TOKENS", + minimum=1, + ) + if env.get("TOPTEK_LMSTUDIO_TEMPERATURE"): + updates["temperature"] = _coerce_float( + env["TOPTEK_LMSTUDIO_TEMPERATURE"], + "env.TOPTEK_LMSTUDIO_TEMPERATURE", + minimum=0.0, + ) + if env.get("TOPTEK_LMSTUDIO_TOP_P"): + updates["top_p"] = _coerce_float( + env["TOPTEK_LMSTUDIO_TOP_P"], "env.TOPTEK_LMSTUDIO_TOP_P", minimum=0.0 + ) + if env.get("TOPTEK_LMSTUDIO_TIMEOUT"): + updates["timeout_s"] = _coerce_int( + env["TOPTEK_LMSTUDIO_TIMEOUT"], + "env.TOPTEK_LMSTUDIO_TIMEOUT", + minimum=1, + ) + return replace(self, **updates) if updates else self + + @dataclass(frozen=True) class AppearanceSettings: """High-level UI theming choices.""" @@ -298,6 +394,7 @@ class UIConfig: appearance: AppearanceSettings = field(default_factory=AppearanceSettings) shell: ShellSettings = field(default_factory=ShellSettings) chart: ChartSettings = field(default_factory=ChartSettings) + lmstudio: LMStudioSettings = field(default_factory=LMStudioSettings) status: StatusMessages = field(default_factory=StatusMessages) @classmethod @@ -306,6 +403,7 @@ def from_mapping(cls, data: Mapping[str, Any]) -> "UIConfig": appearance=AppearanceSettings.from_mapping(data.get("appearance", {})), shell=ShellSettings.from_mapping(data.get("shell", {})), chart=ChartSettings.from_mapping(data.get("chart", {})), + lmstudio=LMStudioSettings.from_mapping(data.get("lmstudio", {})), status=StatusMessages.from_mapping(data.get("status", {})), ) @@ -315,6 +413,7 @@ def apply_environment(self, env: Mapping[str, str]) -> "UIConfig": appearance=self.appearance.apply_environment(env), shell=self.shell.apply_environment(env), chart=self.chart.apply_environment(env), + lmstudio=self.lmstudio.apply_environment(env), ) def with_updates( @@ -323,6 +422,7 @@ def with_updates( appearance: Dict[str, Any] | None = None, shell: Dict[str, Any] | None = None, chart: Dict[str, Any] | None = None, + lmstudio: Dict[str, Any] | None = None, ) -> "UIConfig": """Return a copy of the config with provided section overrides.""" @@ -333,6 +433,8 @@ def with_updates( updates["shell"] = replace(self.shell, **shell) if chart: updates["chart"] = replace(self.chart, **chart) + if lmstudio: + updates["lmstudio"] = replace(self.lmstudio, **lmstudio) return replace(self, **updates) if updates else self def as_dict(self) -> Dict[str, Any]: @@ -340,6 +442,7 @@ def as_dict(self) -> Dict[str, Any]: "appearance": asdict(self.appearance), "shell": asdict(self.shell), "chart": asdict(self.chart), + "lmstudio": asdict(self.lmstudio), "status": asdict(self.status), } @@ -357,6 +460,7 @@ def load_ui_config(path: Path, *, env: Mapping[str, str] | None = None) -> UICon "AppearanceSettings", "ShellSettings", "ChartSettings", + "LMStudioSettings", "ReplayStatus", "StatusMessages", "UIConfig", diff --git a/toptek/filters.py b/toptek/filters.py new file mode 100644 index 0000000..e26ed7d --- /dev/null +++ b/toptek/filters.py @@ -0,0 +1,53 @@ +"""Utility filters for scrubbing sensitive text before transmission.""" + +from __future__ import annotations + +import re +from typing import Any, Dict + +_IP_PATTERN = re.compile(r"\b(?:\d{1,3}\.){3}\d{1,3}\b") +_TICKER_PATTERN = re.compile(r"\b(?=.*[=0-9])[A-Z0-9=]{1,10}\b") + + +def redact_text(value: str) -> str: + """Redact ticker symbols and IPv4 addresses from *value*. + + The implementation intentionally keeps the transformation simple so that + redaction is deterministic and easy to reason about for tests. + """ + + if not value: + return value + redacted = _IP_PATTERN.sub("[REDACTED_IP]", value) + redacted = _TICKER_PATTERN.sub(_mask_ticker, redacted) + return redacted + + +def _mask_ticker(match: re.Match[str]) -> str: + token = match.group(0) + if token.startswith("[REDACTED_"): + return token + return "[REDACTED_TICKER]" + + +def redact_payload(payload: Dict[str, Any]) -> Dict[str, Any]: + """Return a recursively redacted copy of *payload*.""" + + def _scrub(value: Any, *, key: str | None = None) -> Any: + if isinstance(value, dict): + return { + child_key: _scrub(child_val, key=child_key) + for child_key, child_val in value.items() + } + if isinstance(value, list): + return [_scrub(item, key=key) for item in value] + if isinstance(value, str): + if key in {"symbol", "account_id", "route"}: + return "[REDACTED_TICKER]" + return redact_text(value) + return value + + return _scrub(dict(payload)) + + +__all__ = ["redact_payload", "redact_text"] diff --git a/toptek/gui/__init__.py b/toptek/gui/__init__.py index c19be08..db132e2 100644 --- a/toptek/gui/__init__.py +++ b/toptek/gui/__init__.py @@ -33,3 +33,10 @@ } __all__ = ["DARK_PALETTE", "TEXT_WIDGET_DEFAULTS"] + +try: # Re-export optional Live tab when available + from .live_tab import LiveTab # type: ignore F401 +except ModuleNotFoundError: # pragma: no cover - legacy deployments + LiveTab = None # type: ignore +else: + __all__.append("LiveTab") diff --git a/toptek/gui/app.py b/toptek/gui/app.py index e9252cb..d886688 100644 --- a/toptek/gui/app.py +++ b/toptek/gui/app.py @@ -34,6 +34,10 @@ def __init__( def _build_tabs(self) -> None: from . import widgets + try: + from .live_tab import LiveTab as LiveTradingTab + except Exception: # pragma: no cover - optional dependency guard + LiveTradingTab = None tabs = { "Dashboard": ( @@ -65,6 +69,11 @@ def _build_tabs(self) -> None: "Step 6 · Check Topstep guardrails and plan manual execution.", ), } + if LiveTradingTab is not None: + tabs["Live"] = ( + LiveTradingTab, + "Step 7 · Engage the LM Studio copilot and dispatch live orders.", + ) for name, (cls, guidance) in tabs.items(): frame = cls(self, self.configs, self.paths) self.add(frame, text=name) diff --git a/toptek/gui/live_tab.py b/toptek/gui/live_tab.py new file mode 100644 index 0000000..c348ab4 --- /dev/null +++ b/toptek/gui/live_tab.py @@ -0,0 +1,290 @@ +"""Live trading tab wiring for Tkinter GUI.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from typing import Any, Callable, Dict, Iterable, List +import tkinter as tk +from tkinter import ttk + +from core.utils import json_dumps +from toptek import filters + +from . import TEXT_WIDGET_DEFAULTS + + +SuccessCallback = Callable[[Dict[str, Any], Dict[str, Any]], None] +ErrorCallback = Callable[[Dict[str, Any], Exception], None] + + +class LiveTab(ttk.Frame): + """Interactive controls for dispatching live trading requests.""" + + DEFAULT_REQUEST: Dict[str, Any] = { + "account_id": "", + "symbol": "", + "quantity": 1, + "order_type": "MARKET", + "time_in_force": "DAY", + "route": "SIM", + "limit_price": "", + "stop_price": "", + } + + def __init__( + self, + master: ttk.Notebook, + configs: Dict[str, Dict[str, object]], + paths: Any, + *, + client: Any | None = None, + metrics_fetcher: Callable[[], Dict[str, Any]] | None = None, + ) -> None: + super().__init__(master, style="DashboardBackground.TFrame") + self.configs = configs + self.paths = paths + self.client = client + self.metrics_fetcher = metrics_fetcher + self._success_handlers: List[SuccessCallback] = [] + self._error_handlers: List[ErrorCallback] = [] + live_config = self.configs.setdefault("live", {}) + defaults = dict(self.DEFAULT_REQUEST) + defaults.update(live_config.get("defaults", {})) + self.request_defaults: Dict[str, Any] = defaults + live_config.setdefault("defaults", dict(self.request_defaults)) + live_config.setdefault("last_request", None) + live_config.setdefault("last_request_redacted", None) + live_config.setdefault("metrics", {}) + + self.status_var = tk.StringVar(value="Ready to trade") + self.metrics_visible = tk.BooleanVar(value=True) + + self.account_var = tk.StringVar(value=str(self.request_defaults["account_id"])) + self.symbol_var = tk.StringVar(value=str(self.request_defaults["symbol"])) + self.quantity_var = tk.StringVar(value=str(self.request_defaults["quantity"])) + self.order_type_var = tk.StringVar(value=str(self.request_defaults["order_type"])) + self.tif_var = tk.StringVar(value=str(self.request_defaults["time_in_force"])) + self.route_var = tk.StringVar(value=str(self.request_defaults["route"])) + self.limit_var = tk.StringVar(value=str(self.request_defaults["limit_price"])) + self.stop_var = tk.StringVar(value=str(self.request_defaults["stop_price"])) + + self.metrics_state: Dict[str, Any] = { + "orders_sent": 0, + "errors": 0, + "last_status": self.status_var.get(), + "last_refresh": None, + } + + self._build_controls() + self._build_metrics() + + # ------------------------------------------------------------------ UI -- + def _build_controls(self) -> None: + container = ttk.Frame(self, style="DashboardBackground.TFrame") + container.pack(fill=tk.X, padx=16, pady=12) + + grid = ttk.Frame(container, style="DashboardBackground.TFrame") + grid.pack(fill=tk.X) + + self._add_field(grid, "Account", self.account_var, row=0, column=0) + self._add_field(grid, "Symbol", self.symbol_var, row=0, column=1) + self._add_field(grid, "Quantity", self.quantity_var, row=0, column=2) + self._add_field(grid, "Order type", self.order_type_var, row=1, column=0) + self._add_field(grid, "Time-in-force", self.tif_var, row=1, column=1) + self._add_field(grid, "Route", self.route_var, row=1, column=2) + self._add_field(grid, "Limit", self.limit_var, row=2, column=0) + self._add_field(grid, "Stop", self.stop_var, row=2, column=1) + + grid.grid_columnconfigure(0, weight=1) + grid.grid_columnconfigure(1, weight=1) + grid.grid_columnconfigure(2, weight=1) + + actions = ttk.Frame(container, style="DashboardBackground.TFrame") + actions.pack(fill=tk.X, pady=(12, 0)) + + ttk.Button(actions, text="Send order", command=self.submit_order).pack( + side=tk.LEFT + ) + ttk.Checkbutton( + actions, + text="Show metrics", + variable=self.metrics_visible, + command=self._update_metrics_visibility, + ).pack(side=tk.LEFT, padx=(12, 0)) + ttk.Label( + actions, + textvariable=self.status_var, + style="StatusInfo.TLabel", + ).pack(side=tk.RIGHT) + + def _add_field( + self, + master: ttk.Frame, + label: str, + variable: tk.StringVar, + *, + row: int, + column: int, + ) -> None: + ttk.Label(master, text=label).grid(row=row, column=column, sticky=tk.W, padx=4) + entry = ttk.Entry(master, textvariable=variable, width=18) + entry.grid(row=row, column=column, padx=4, pady=(4, 8), sticky=tk.EW) + + def _build_metrics(self) -> None: + self.metrics_frame = ttk.Frame(self, style="DashboardBackground.TFrame") + self.metrics_frame.pack(fill=tk.BOTH, expand=True, padx=16, pady=(0, 16)) + + self.metrics_output = tk.Text(self.metrics_frame, height=10, wrap="word") + self.metrics_output.pack(fill=tk.BOTH, expand=True) + self._style_text_widget(self.metrics_output) + self.refresh_metrics() + + # --------------------------------------------------------------- Actions -- + def register_callbacks( + self, + *, + on_success: SuccessCallback | Iterable[SuccessCallback] | None = None, + on_error: ErrorCallback | Iterable[ErrorCallback] | None = None, + ) -> None: + """Register callbacks invoked after request completion.""" + + if on_success is not None: + self._success_handlers.extend(self._normalise_callbacks(on_success)) + if on_error is not None: + self._error_handlers.extend(self._normalise_callbacks(on_error)) + + @staticmethod + def _normalise_callbacks( + callbacks: SuccessCallback | ErrorCallback | Iterable[Any], + ) -> List[Any]: + if callable(callbacks): + return [callbacks] + return [callback for callback in callbacks if callable(callback)] + + def compose_request(self) -> Dict[str, Any]: + """Compose an order request from UI state and config defaults.""" + + request = { + "account_id": self._value_or_default(self.account_var), + "symbol": self._value_or_default(self.symbol_var), + "quantity": self._int_or_default(self.quantity_var), + "order_type": self._value_or_default(self.order_type_var), + "time_in_force": self._value_or_default(self.tif_var), + "route": self._value_or_default(self.route_var), + "limit_price": self._numeric_or_blank(self.limit_var), + "stop_price": self._numeric_or_blank(self.stop_var), + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + } + live_section = self.configs.setdefault("live", {}) + live_section["last_request"] = request + live_section["last_request_redacted"] = filters.redact_payload(request) + return request + + def submit_order(self) -> Dict[str, Any] | None: + """Send the composed request to the live trading client.""" + + payload = self.compose_request() + if self.client is None: + self._handle_error(payload, RuntimeError("Live client unavailable")) + return None + try: + response = self.client.place_order(payload) + except Exception as exc: # pragma: no cover - defensive + self._handle_error(payload, exc) + return None + self._handle_success(payload, response) + return response + + def refresh_metrics(self) -> Dict[str, Any]: + """Refresh the metrics display using the configured fetcher.""" + + if self.metrics_fetcher is not None: + metrics = self.metrics_fetcher() + else: + metrics = dict(self.metrics_state) + metrics.setdefault("orders_sent", self.metrics_state.get("orders_sent", 0)) + metrics.setdefault("errors", self.metrics_state.get("errors", 0)) + metrics["last_status"] = self.status_var.get() + metrics["last_refresh"] = datetime.now(tz=timezone.utc).isoformat() + self.metrics_state.update(metrics) + self.metrics_output.delete("1.0", tk.END) + self.metrics_output.insert("1.0", json_dumps(metrics, indent=2)) + self.metrics_output.see("1.0") + self.configs.setdefault("live", {})["metrics"] = dict(self.metrics_state) + return metrics + + # ------------------------------------------------------------- Callbacks -- + def _handle_success( + self, payload: Dict[str, Any], response: Dict[str, Any] + ) -> None: + self.metrics_state["orders_sent"] = self.metrics_state.get("orders_sent", 0) + 1 + status = response.get("status", "ACCEPTED") + reference = response.get("id") or response.get("order_id") or "n/a" + self.status_var.set(f"Order {status} · Ref {reference}") + self.metrics_state["last_status"] = self.status_var.get() + self.refresh_metrics() + for callback in self._success_handlers: + callback(payload, response) + + def _handle_error(self, payload: Dict[str, Any], error: Exception) -> None: + self.metrics_state["errors"] = self.metrics_state.get("errors", 0) + 1 + self.status_var.set(f"Error: {error}") + self.metrics_state["last_status"] = self.status_var.get() + self.refresh_metrics() + for callback in self._error_handlers: + callback(payload, error) + + # ----------------------------------------------------------- UI helpers -- + def _value_or_default(self, variable: tk.StringVar) -> Any: + value = variable.get().strip() + if value: + return value + name = self._variable_name(variable) + return self.request_defaults.get(name, "") + + def _int_or_default(self, variable: tk.StringVar) -> int: + value = variable.get().strip() + if value: + try: + return int(value) + except ValueError: + pass + name = self._variable_name(variable) + return int(self.request_defaults.get(name, 0) or 0) + + def _numeric_or_blank(self, variable: tk.StringVar) -> Any: + value = variable.get().strip() + if not value: + name = self._variable_name(variable) + return self.request_defaults.get(name, "") + try: + return float(value) + except ValueError: + return value + + def _variable_name(self, variable: tk.StringVar) -> str: + mapping = { + id(self.account_var): "account_id", + id(self.symbol_var): "symbol", + id(self.quantity_var): "quantity", + id(self.order_type_var): "order_type", + id(self.tif_var): "time_in_force", + id(self.route_var): "route", + id(self.limit_var): "limit_price", + id(self.stop_var): "stop_price", + } + return mapping.get(id(variable), "") + + def _update_metrics_visibility(self) -> None: + if self.metrics_visible.get(): + if not self.metrics_frame.winfo_manager(): + self.metrics_frame.pack(fill=tk.BOTH, expand=True, padx=16, pady=(0, 16)) + else: + if self.metrics_frame.winfo_manager(): + self.metrics_frame.pack_forget() + + def _style_text_widget(self, widget: tk.Text) -> None: + widget.configure(**TEXT_WIDGET_DEFAULTS) + + +__all__ = ["LiveTab"] diff --git a/toptek/lmstudio.py b/toptek/lmstudio.py new file mode 100644 index 0000000..70059ea --- /dev/null +++ b/toptek/lmstudio.py @@ -0,0 +1,175 @@ +"""Minimal synchronous client for LM Studio's OpenAI-compatible API.""" + +from __future__ import annotations + +import json +from dataclasses import dataclass +from typing import Any, Dict, Iterable, Iterator, Optional +import urllib.error +import urllib.request + + +class HTTPError(RuntimeError): + """Error raised when an HTTP request fails.""" + + +@dataclass +class Model: + model_id: str + owned_by: Optional[str] = None + max_context: Optional[int] = None + description: Optional[str] = None + + @classmethod + def from_payload(cls, payload: Dict[str, Any]) -> "Model": + context = payload.get("metadata", {}).get("context_length") + if context is None: + context = payload.get("metadata", {}).get("context_window") + try: + context_window = int(context) if context is not None else None + except (TypeError, ValueError): + context_window = None + return cls( + model_id=str(payload.get("id")), + owned_by=payload.get("owned_by"), + max_context=context_window, + description=payload.get("description") + or payload.get("metadata", {}).get("display_name"), + ) + + +class _URLLibResponse: + def __init__(self, response: Any) -> None: + self._response = response + self.status = getattr(response, "status", getattr(response, "code", None)) + + def read(self) -> bytes: + data = self._response.read() + self._response.close() + return data + + def iter_lines(self) -> Iterator[str]: + try: + iterator: Iterable[bytes] = self._response + except TypeError as exc: # pragma: no cover - defensive + raise HTTPError("Response is not iterable") from exc + try: + for chunk in iterator: + yield chunk.decode("utf-8").rstrip("\n") + finally: + self._response.close() + + +class _URLLibTransport: + def __init__(self, opener: Optional[urllib.request.OpenerDirector] = None) -> None: + self._opener = opener or urllib.request.build_opener() + + def request( + self, + method: str, + url: str, + *, + data: Optional[bytes] = None, + headers: Optional[Dict[str, str]] = None, + timeout: float = 30.0, + ) -> _URLLibResponse: + request = urllib.request.Request(url, data=data, headers=headers or {}, method=method) + try: + response = self._opener.open(request, timeout=timeout) + except urllib.error.HTTPError as exc: # pragma: no cover - network failure + raise HTTPError(f"HTTP {exc.code}: {exc.reason}") from exc + except urllib.error.URLError as exc: # pragma: no cover - network failure + raise HTTPError(str(exc.reason)) from exc + return _URLLibResponse(response) + + +class LMStudioClient: + """Blocking LM Studio client using stdlib ``urllib`` for portability.""" + + def __init__( + self, + settings: Dict[str, Any], + *, + transport: Optional[_URLLibTransport] = None, + timeout: float | None = None, + ) -> None: + self._base_url = settings.get("base_url", "http://localhost:1234/v1").rstrip("/") + self._api_key = settings.get("api_key", "") + self._model = settings.get("model") + self._timeout = timeout or float(settings.get("timeout_s", 30)) + self._transport = transport or _URLLibTransport() + + def list_models(self) -> list[Model]: + response = self._request("GET", "/models") + data = self._decode_json(response) + models = data.get("data") if isinstance(data, dict) else None + if not isinstance(models, list): + return [] + return [Model.from_payload(item) for item in models if isinstance(item, dict)] + + def health(self) -> bool: + try: + self._request("GET", "/models") + except HTTPError: + return False + return True + + def chat_stream(self, payload: Dict[str, Any]) -> Iterator[str]: + request_payload = dict(payload) + if self._model and "model" not in request_payload: + request_payload["model"] = self._model + response = self._request("POST", "/chat/completions", payload=request_payload, stream=True) + for line in response.iter_lines(): + if not line: + continue + yield line + + # ------------------------------------------------------------------ internals + def _headers(self) -> Dict[str, str]: + headers = {"Content-Type": "application/json"} + if self._api_key: + headers["Authorization"] = f"Bearer {self._api_key}" + return headers + + def _request( + self, + method: str, + path: str, + *, + payload: Optional[Dict[str, Any]] = None, + stream: bool = False, + ) -> _URLLibResponse: + url = f"{self._base_url}{path}" + data = None + if payload is not None: + data = json.dumps(payload).encode("utf-8") + response = self._transport.request( + method, + url, + data=data, + headers=self._headers(), + timeout=self._timeout, + ) + status = response.status or 0 + if status >= 400: + raise HTTPError(f"HTTP {status}") + if stream: + return response + # For non-streaming calls we still return a response wrapper to allow JSON decode. + return response + + def _decode_json(self, response: _URLLibResponse) -> Dict[str, Any]: + body = response.read() + if not body: + return {} + if isinstance(body, str): + text = body + else: + text = body.decode("utf-8") + try: + return json.loads(text) + except json.JSONDecodeError as exc: # pragma: no cover - defensive + raise HTTPError("Invalid JSON response") from exc + + +__all__ = ["HTTPError", "LMStudioClient", "Model"] diff --git a/toptek/runtime_guard.py b/toptek/runtime_guard.py new file mode 100644 index 0000000..aada254 --- /dev/null +++ b/toptek/runtime_guard.py @@ -0,0 +1,24 @@ +"""Runtime guardrails for unsupported Python versions.""" + +from __future__ import annotations + +import sys +import warnings + + +UNSUPPORTED_VERSION = (3, 12) + + +def warn_if_unsupported() -> None: + """Emit a warning when running on unsupported interpreter versions.""" + + version = sys.version_info + if (version.major, version.minor) >= UNSUPPORTED_VERSION: + warnings.warn( + "Python 3.12+ is not yet validated for Toptek; unexpected behaviour may occur.", + RuntimeWarning, + stacklevel=2, + ) + + +__all__ = ["warn_if_unsupported", "UNSUPPORTED_VERSION"] diff --git a/toptek/ui/live_tab.py b/toptek/ui/live_tab.py new file mode 100644 index 0000000..de95697 --- /dev/null +++ b/toptek/ui/live_tab.py @@ -0,0 +1,7 @@ +"""Convenience re-export of the Tkinter Live tab widget.""" + +from __future__ import annotations + +from toptek.gui.live_tab import LiveTab + +__all__ = ["LiveTab"]