Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 28 additions & 2 deletions src/app/endpoints/conversations.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,23 @@ async def get_conversation_endpoint_handler(
conversation_id: str,
auth: Any = Depends(auth_dependency),
) -> ConversationResponse:
"""Handle request to retrieve a conversation by ID."""
"""
Handle request to retrieve a conversation by ID.

Retrieve a conversation's chat history by its ID. Then fetches
the conversation session from the Llama Stack backend,
simplifies the session data to essential chat history, and
returns it in a structured response. Raises HTTP 400 for
invalid IDs, 404 if not found, 503 if the backend is
unavailable, and 500 for unexpected errors.

Parameters:
conversation_id (str): Unique identifier of the conversation to retrieve.

Returns:
ConversationResponse: Structured response containing the conversation
ID and simplified chat history.
"""
check_configuration_loaded(configuration)

# Validate conversation ID format
Expand Down Expand Up @@ -286,7 +302,17 @@ async def delete_conversation_endpoint_handler(
conversation_id: str,
auth: Any = Depends(auth_dependency),
) -> ConversationDeleteResponse:
"""Handle request to delete a conversation by ID."""
"""
Handle request to delete a conversation by ID.

Validates the conversation ID format and attempts to delete the
corresponding session from the Llama Stack backend. Raises HTTP
errors for invalid IDs, not found conversations, connection
issues, or unexpected failures.

Returns:
ConversationDeleteResponse: Response indicating the result of the deletion operation.
"""
check_configuration_loaded(configuration)

# Validate conversation ID format
Expand Down
42 changes: 32 additions & 10 deletions src/app/endpoints/feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
router = APIRouter(prefix="/feedback", tags=["feedback"])
auth_dependency = get_auth_dependency()

# Response for the feedback endpoint
feedback_response: dict[int | str, dict[str, Any]] = {
200: {
"description": "Feedback received and stored",
Expand All @@ -45,22 +46,29 @@


def is_feedback_enabled() -> bool:
"""Check if feedback is enabled.
"""
Check if feedback is enabled.

Return whether user feedback collection is currently enabled
based on configuration.

Returns:
bool: True if feedback is enabled, False otherwise.
bool: True if feedback collection is enabled; otherwise, False.
"""
return configuration.user_data_collection_configuration.feedback_enabled


async def assert_feedback_enabled(_request: Request) -> None:
"""Check if feedback is enabled.
"""
Ensure that feedback collection is enabled.

Raises an HTTP 403 error if it is not.

Args:
request (Request): The FastAPI request object.

Raises:
HTTPException: If feedback is disabled.
HTTPException: If feedback collection is disabled.
"""
feedback_enabled = is_feedback_enabled()
if not feedback_enabled:
Expand All @@ -78,6 +86,9 @@ def feedback_endpoint_handler(
) -> FeedbackResponse:
"""Handle feedback requests.

Processes a user feedback submission, storing the feedback and
returning a confirmation response.

Args:
feedback_request: The request containing feedback information.
ensure_feedback_enabled: The feedback handler (FastAPI Depends) that
Expand All @@ -87,6 +98,9 @@ def feedback_endpoint_handler(

Returns:
Response indicating the status of the feedback storage request.

Raises:
HTTPException: Returns HTTP 500 if feedback storage fails.
"""
logger.debug("Feedback received %s", str(feedback_request))

Expand All @@ -107,11 +121,15 @@ def feedback_endpoint_handler(


def store_feedback(user_id: str, feedback: dict) -> None:
"""Store feedback in the local filesystem.
"""
Store feedback in the local filesystem.

Args:
user_id: The user ID (UUID).
feedback: The feedback to store.
Persist user feedback to a uniquely named JSON file in the
configured local storage directory.

Parameters:
user_id (str): Unique identifier of the user submitting feedback.
feedback (dict): Feedback data to be stored, merged with user ID and timestamp.
"""
logger.debug("Storing feedback for user %s", user_id)
# Creates storage path only if it doesn't exist. The `exist_ok=True` prevents
Expand All @@ -135,10 +153,14 @@ def store_feedback(user_id: str, feedback: dict) -> None:

@router.get("/status")
def feedback_status() -> StatusResponse:
"""Handle feedback status requests.
"""
Handle feedback status requests.

Return the current enabled status of the feedback
functionality.

Returns:
Response indicating the status of the feedback.
StatusResponse: Indicates whether feedback collection is enabled.
"""
logger.debug("Feedback status requested")
feedback_status_enabled = is_feedback_enabled()
Expand Down
16 changes: 14 additions & 2 deletions src/app/endpoints/models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Handler for REST API call to provide info."""
"""Handler for REST API call to list available models."""

import logging
from typing import Any
Expand Down Expand Up @@ -44,7 +44,19 @@

@router.get("/models", responses=models_responses)
async def models_endpoint_handler(_request: Request) -> ModelsResponse:
"""Handle requests to the /models endpoint."""
"""
Handle requests to the /models endpoint.

Process GET requests to the /models endpoint, returning a list of available
models from the Llama Stack service.

Raises:
HTTPException: If unable to connect to the Llama Stack server or if
model retrieval fails for any reason.

Returns:
ModelsResponse: An object containing the list of available models.
"""
check_configuration_loaded(configuration)

llama_stack_configuration = configuration.llama_stack_configuration
Expand Down