Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions .github/workflows/pylint.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Python linter

on:
- push
- pull_request

jobs:
pylint:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: read
name: "Pylinter"
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Python version
run: python --version
- name: PDM installation
run: pip install --user pdm
- name: Install dependencies
run: pdm install
- name: Install devel dependencies
run: pdm install --group default,dev
- name: Python linter
run: pdm run pylint src
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ PATH_TO_PLANTUML := ~/bin


run: ## Run the service locally
python src/lightspeed-stack.py
python src/lightspeed_stack.py

test-unit: ## Run the unit tests
@echo "Running unit tests..."
Expand Down
561 changes: 373 additions & 188 deletions pdm.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ dev = [
"pytest-cov>=5.0.0",
"pytest-mock>=3.14.0",
"pyright>=1.1.401",
"pylint>=3.3.7",
]

[tool.pytest.ini_options]
Expand Down
2 changes: 1 addition & 1 deletion src/app/endpoints/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@


@router.get("/config", responses=get_config_responses)
def config_endpoint_handler(request: Request) -> Configuration:
def config_endpoint_handler(_request: Request) -> Configuration:
"""Handle requests to the /config endpoint."""
return configuration.configuration
2 changes: 1 addition & 1 deletion src/app/endpoints/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@


@router.get("/info", responses=get_into_responses)
def info_endpoint_handler(request: Request) -> InfoResponse:
def info_endpoint_handler(_request: Request) -> InfoResponse:
"""Handle request to the /info endpoint."""
return InfoResponse(name="foo", version=__version__)
2 changes: 1 addition & 1 deletion src/app/endpoints/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@


@router.get("/models", responses=models_responses)
def models_endpoint_handler(request: Request) -> ModelsResponse:
def models_endpoint_handler(_request: Request) -> ModelsResponse:
"""Handle requests to the /models endpoint."""
llama_stack_config = configuration.llama_stack_configuration
logger.info("LLama stack config: %s", llama_stack_config)
Expand Down
16 changes: 8 additions & 8 deletions src/app/endpoints/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@

@router.post("/query", responses=query_response)
def query_endpoint_handler(
request: Request, query_request: QueryRequest
_request: Request, query_request: QueryRequest
) -> QueryResponse:
"""Handle request to the /query endpoint."""
llama_stack_config = configuration.llama_stack_configuration
Expand Down Expand Up @@ -58,9 +58,9 @@ def select_model_id(client: LlamaStackClient, query_request: QueryRequest) -> st
for m in models
if m.model_type == "llm" # pyright: ignore[reportAttributeAccessIssue]
).identifier
logger.info(f"Selected model: {model}")
logger.info("Selected model: %s", model)
return model
except (StopIteration, AttributeError):
except (StopIteration, AttributeError) as e:
message = "No LLM model found in available models"
logger.error(message)
raise HTTPException(
Expand All @@ -69,9 +69,9 @@ def select_model_id(client: LlamaStackClient, query_request: QueryRequest) -> st
"response": constants.UNABLE_TO_PROCESS_RESPONSE,
"cause": message,
},
)
) from e

logger.info(f"Searching for model: {model_id}, provider: {provider_id}")
logger.info("Searching for model: %s, provider: %s", model_id, provider_id)
if not any(
m.identifier == model_id and m.provider_id == provider_id for m in models
):
Expand All @@ -96,15 +96,15 @@ def retrieve_response(
if not available_shields:
logger.info("No available shields. Disabling safety")
else:
logger.info(f"Available shields found: {available_shields}")
logger.info("Available shields found: %s", available_shields)

# use system prompt from request or default one
system_prompt = (
query_request.system_prompt
if query_request.system_prompt
else constants.DEFAULT_SYSTEM_PROMPT
)
logger.debug(f"Using system prompt: {system_prompt}")
logger.debug("Using system prompt: %s", system_prompt)

# TODO(lucasagomes): redact attachments content before sending to LLM
# if attachments are provided, validate them
Expand All @@ -119,7 +119,7 @@ def retrieve_response(
tools=[],
)
session_id = agent.create_session("chat_session")
logger.debug(f"Session ID: {session_id}")
logger.debug("Session ID: %s", session_id)
response = agent.create_turn(
messages=[UserMessage(role="user", content=query_request.query)],
session_id=session_id,
Expand Down
6 changes: 3 additions & 3 deletions src/app/endpoints/root.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
router = APIRouter(tags=["root"])


index_page = """
INDEX_PAGE = """
<html>
<head>
<title>Lightspeed core service</title>
Expand All @@ -26,7 +26,7 @@


@router.get("/", response_class=HTMLResponse)
def root_endpoint_handler(request: Request) -> HTMLResponse:
def root_endpoint_handler(_request: Request) -> HTMLResponse:
"""Handle request to the / endpoint."""
logger.info("Serving index page")
return HTMLResponse(index_page)
return HTMLResponse(INDEX_PAGE)
12 changes: 6 additions & 6 deletions src/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ def get_llama_stack_client(
return client
msg = "Configuration problem: library_client_config_path option is not set"
logger.error(msg)
raise Exception(msg)
else:
logger.info("Using Llama stack running as a service")
return LlamaStackClient(
base_url=llama_stack_config.url, api_key=llama_stack_config.api_key
)
# tisnik: use custom exception there - with cause etc.
raise Exception(msg) # pylint: disable=broad-exception-raised
logger.info("Using Llama stack running as a service")
return LlamaStackClient(
base_url=llama_stack_config.url, api_key=llama_stack_config.api_key
)
4 changes: 2 additions & 2 deletions src/configuration.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
"""Configuration loader."""

import yaml

import logging
from typing import Any, Optional

import yaml
from models.config import Configuration, LLamaStackConfiguration

logger = logging.getLogger(__name__)
Expand Down
7 changes: 4 additions & 3 deletions src/lightspeed-stack.py → src/lightspeed_stack.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@
from argparse import ArgumentParser
import logging

from rich.logging import RichHandler

from runners.uvicorn import start_uvicorn
from models.config import Configuration
from configuration import configuration

from rich.logging import RichHandler

FORMAT = "%(message)s"
logging.basicConfig(
Expand Down Expand Up @@ -39,10 +40,10 @@ def create_argument_parser() -> ArgumentParser:
return parser


def dump_configuration(configuration: Configuration) -> None:
def dump_configuration(service_configuration: Configuration) -> None:
"""Dump actual configuration into JSON file."""
with open("configuration.json", "w", encoding="utf-8") as fout:
fout.write(configuration.model_dump_json(indent=4))
fout.write(service_configuration.model_dump_json(indent=4))


def main() -> None:
Expand Down
6 changes: 4 additions & 2 deletions src/models/config.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
"""Model with service configuration."""

from typing import Optional

from pydantic import BaseModel, model_validator

from typing import Optional
from typing_extensions import Self


Expand Down Expand Up @@ -52,8 +53,9 @@ def check_llama_stack_model(self) -> Self:
self.use_as_library_client = False
if self.use_as_library_client:
if self.library_client_config_path is None:
# pylint: disable=line-too-long
raise ValueError(
"LLama stack library client mode is enabled but a configuration file path is not specified"
"LLama stack library client mode is enabled but a configuration file path is not specified" # noqa: C0301
)
return self

Expand Down
6 changes: 4 additions & 2 deletions src/models/requests.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Model for service requests."""

from typing import Optional, Self

from pydantic import BaseModel, model_validator

from llama_stack_client.types.agents.turn_create_params import Document
from typing import Optional, Self


class Attachment(BaseModel):
Expand Down Expand Up @@ -117,7 +119,7 @@ def get_documents(self) -> list[Document]:
return []
return [
Document(content=att.content, mime_type=att.content_type)
for att in self.attachments
for att in self.attachments # pylint: disable=not-an-iterable
]

@model_validator(mode="after")
Expand Down
3 changes: 2 additions & 1 deletion src/models/responses.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
"""Models for service responses."""

from pydantic import BaseModel
from typing import Any, Optional

from pydantic import BaseModel


class ModelsResponse(BaseModel):
"""Model representing a response to models request."""
Expand Down