Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/e2e_tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ jobs:
isAbsolutePath: false
file: 'lightspeed-stack.yaml'
content: |
name: foo bar baz
name: Lightspeed Core Service (LCS)
service:
host: 0.0.0.0
port: 8080
Expand Down
101 changes: 55 additions & 46 deletions tests/e2e/features/info.feature
Original file line number Diff line number Diff line change
@@ -1,46 +1,55 @@
# Feature: Info endpoint API tests
#TODO: fix test

# Background:
# Given The service is started locally
# And REST API service hostname is localhost
# And REST API service port is 8080
# And REST API service prefix is /v1

# Scenario: Check if the OpenAPI endpoint works as expected
# Given The system is in default state
# When I access endpoint "openapi.json" using HTTP GET method
# Then The status code of the response is 200
# And The body of the response contains OpenAPI

# Scenario: Check if info endpoint is working
# Given The system is in default state
# When I access REST API endpoint "info" using HTTP GET method
# Then The status code of the response is 200
# And The body of the response has proper name "lightspeed_stack" and version "0.2.0"

# Scenario: Check if models endpoint is working
# Given The system is in default state
# When I access REST API endpoint "models" using HTTP GET method
# Then The status code of the response is 200
# And The body of the response contains gpt


# Scenario: Check if models endpoint is working
# Given The system is in default state
# And The llama-stack connection is disrupted
# When I access REST API endpoint "models" using HTTP GET method
# Then The status code of the response is 503

# Scenario: Check if metrics endpoint is working
# Given The system is in default state
# When I access REST API endpoint "metrics" using HTTP GET method
# Then The status code of the response is 200
# And The body of the response has proper metrics

# Scenario: Check if metrics endpoint is working
# Given The system is in default state
# And The llama-stack connection is disrupted
# When I access REST API endpoint "metrics" using HTTP GET method
# Then The status code of the response is 500

Feature: Info tests


Background:
Given The service is started locally
And REST API service hostname is localhost
And REST API service port is 8080
And REST API service prefix is /v1

Scenario: Check if the OpenAPI endpoint works as expected
Given The system is in default state
When I access endpoint "openapi.json" using HTTP GET method
Then The status code of the response is 200
And The body of the response contains OpenAPI

Scenario: Check if info endpoint is working
Given The system is in default state
When I access REST API endpoint "info" using HTTP GET method
Then The status code of the response is 200
And The body of the response has proper name Lightspeed Core Service (LCS) and version 0.2.0
And The body of the response has llama-stack version 0.2.19

Scenario: Check if info endpoint reports error when llama-stack connection is not working
Given The system is in default state
And The llama-stack connection is disrupted
When I access REST API endpoint "info" using HTTP GET method
Then The status code of the response is 500
And The body of the response is the following
"""
{"detail": {"response": "Unable to connect to Llama Stack", "cause": "Connection error."}}
"""

Scenario: Check if models endpoint is working
Given The system is in default state
When I access REST API endpoint "models" using HTTP GET method
Then The status code of the response is 200
And The body of the response for model gpt-4o-mini has proper structure


Scenario: Check if models endpoint is working
Given The system is in default state
And The llama-stack connection is disrupted
When I access REST API endpoint "models" using HTTP GET method
Then The status code of the response is 500
And The body of the response is the following
"""
{"detail": {"response": "Unable to connect to Llama Stack", "cause": "Connection error."}}
"""


Scenario: Check if metrics endpoint is working
Given The system is in default state
When I access endpoint "metrics" using HTTP GET method
Then The status code of the response is 200
And The body of the response contains ls_provider_model_configuration
67 changes: 51 additions & 16 deletions tests/e2e/features/steps/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,54 @@
from behave.runner import Context


@then(
"The body of the response has proper name {system_prompt:w} and version {version:w}"
)
def check_name_version(context: Context, system_prompt: str, version: str) -> None:
"""Check proper name and version number."""
context.system_prompt = system_prompt
context.version = version
# TODO: add step implementation
assert context is not None


@then("The body of the response has proper metrics")
def check_metrics(context: Context) -> None:
"""Check proper metrics."""
# TODO: add step implementation
assert context is not None
@then("The body of the response has proper name {service_name} and version {version}")
def check_name_version(context: Context, service_name: str, version: str) -> None:
"""Check proper service name and version number."""
response_json = context.response.json()
assert response_json is not None, "Response is not valid JSON"

assert response_json["name"] == service_name, f"name is {response_json["name"]}"
assert (
response_json["service_version"] == version
), f"version is {response_json["service_version"]}"


@then("The body of the response has llama-stack version {llama_version}")
def check_llama_version(context: Context, llama_version: str) -> None:
"""Check proper llama-stack version number."""
response_json = context.response.json()
assert response_json is not None, "Response is not valid JSON"

assert (
response_json["llama_stack_version"] == llama_version
), f"llama-stack version is {response_json["llama_stack_version"]}"


@then("The body of the response for model {model} has proper structure")
def check_model_structure(context: Context, model: str) -> None:
"""Check that the gpt-4o-mini model has the correct structure and required fields."""
response_json = context.response.json()
assert response_json is not None, "Response is not valid JSON"

assert "models" in response_json, "Response missing 'models' field"
models = response_json["models"]
assert len(models) > 0, "Models list should not be empty"

gpt_model = None
for model_id in models:
if "gpt-4o-mini" in model_id.get("identifier", ""):
gpt_model = model_id
break

assert gpt_model is not None

assert gpt_model["type"] == "model", "type should be 'model'"
assert gpt_model["api_model_type"] == "llm", "api_model_type should be 'llm'"
assert gpt_model["model_type"] == "llm", "model_type should be 'llm'"
assert gpt_model["provider_id"] == "openai", "provider_id should be 'openai'"
assert (
gpt_model["provider_resource_id"] == model
), "provider_resource_id should be 'gpt-4o-mini'"
assert (
gpt_model["identifier"] == f"openai/{model}"
), "identifier should be 'openai/gpt-4o-mini'"
Loading