From 94b94be130118f37266c2d9d2d3fd97a7898d637 Mon Sep 17 00:00:00 2001 From: Radovan Fuchs Date: Thu, 2 Oct 2025 13:56:52 +0200 Subject: [PATCH] Add E2E tests for query endpoint --- .github/workflows/e2e_tests.yaml | 13 +- tests/e2e/features/query.feature | 176 ++++++++++++------ .../e2e/features/steps/llm_query_response.py | 26 ++- tests/e2e/features/streaming_query.feature | 6 +- 4 files changed, 156 insertions(+), 65 deletions(-) diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml index dca3cbeb..22ef6ac4 100644 --- a/.github/workflows/e2e_tests.yaml +++ b/.github/workflows/e2e_tests.yaml @@ -131,7 +131,7 @@ jobs: storage_dir: /tmp/llama-stack-files metadata_store: type: sqlite - db_path: .llama/distributions/ollama/files_metadata.db + db_path: /app-root/.llama/distributions/ollama/files_metadata.db provider_id: localfs provider_type: inline::localfs agents: @@ -289,3 +289,14 @@ jobs: echo "Running comprehensive e2e test suite..." make test-e2e + + - name: Show logs on failure + if: failure() + run: | + echo "=== Test failure logs ===" + echo "=== llama-stack logs ===" + docker compose logs llama-stack + + echo "" + echo "=== lightspeed-stack logs ===" + docker compose logs lightspeed-stack diff --git a/tests/e2e/features/query.feature b/tests/e2e/features/query.feature index 581840d9..fb89a04e 100644 --- a/tests/e2e/features/query.feature +++ b/tests/e2e/features/query.feature @@ -1,60 +1,116 @@ -# Feature: Query endpoint API tests -#TODO: fix test - -# Background: -# Given The service is started locally -# And REST API service hostname is localhost -# And REST API service port is 8080 -# And REST API service prefix is /v1 - - -# Scenario: Check if LLM responds to sent question -# Given The system is in default state -# When I use "query" to ask question "Say hello" -# Then The status code of the response is 200 -# And The response should have proper LLM response format -# And The response should contain following fragments -# | Fragments in LLM response | -# | Hello | - -# Scenario: Check if LLM responds to sent question with different system prompt -# Given The system is in default state -# And I change the system prompt to "new system prompt" -# When I use "query" to ask question "Say hello" -# Then The status code of the response is 200 -# And The response should have proper LLM response format -# And The response should contain following fragments -# | Fragments in LLM response | -# | Hello | - -# Scenario: Check if LLM responds with error for malformed request -# Given The system is in default state -# And I modify the request body by removing the "query" -# When I use "query" to ask question "Say hello" -# Then The status code of the response is 422 -# And The body of the response is the following -# """ -# { "type": "missing", "loc": [ "body", "system_query" ], "msg": "Field required", } -# """ - -# Scenario: Check if LLM responds to sent question with error when not authenticated -# Given The system is in default state -# And I remove the auth header -# When I use "query" to ask question "Say hello" -# Then The status code of the response is 200 -# Then The status code of the response is 400 -# And The body of the response is the following -# """ -# {"detail": "Unauthorized: No auth header found"} -# """ - -# Scenario: Check if LLM responds to sent question with error when not authorized -# Given The system is in default state -# And I modify the auth header so that the user is it authorized -# When I use "query" to ask question "Say hello" -# Then The status code of the response is 403 -# And The body of the response is the following -# """ -# {"detail": "Forbidden: User is not authorized to access this resource"} -# """ - \ No newline at end of file +@Authorized +Feature: Query endpoint API tests + + Background: + Given The service is started locally + And REST API service hostname is localhost + And REST API service port is 8080 + And REST API service prefix is /v1 + + Scenario: Check if LLM responds properly to restrictive system prompt to sent question with different system prompt + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything but openshift questions"} + """ + Then The status code of the response is 200 + And The response should contain following fragments + | Fragments in LLM response | + | ask | + + Scenario: Check if LLM responds properly to non-restrictive system prompt to sent question with different system prompt + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "you are linguistic assistant"} + """ + Then The status code of the response is 200 + And The response should contain following fragments + | Fragments in LLM response | + | checkout | + + Scenario: Check if LLM ignores new system prompt in same conversation + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Generate sample yaml file for simple GitHub Actions workflow.", "system_prompt": "refuse to answer anything but openshift questions"} + """ + Then The status code of the response is 200 + And I store conversation details + And I use "query" to ask question with same conversation_id + """ + {"query": "Write a simple code for reversing string", "system_prompt": "provide coding assistance", "model": "gpt-4-turbo", "provider": "openai"} + """ + Then The status code of the response is 200 + And The response should contain following fragments + | Fragments in LLM response | + | ask | + + Scenario: Check if LLM responds to sent question with error when not authenticated + Given The system is in default state + When I use "query" to ask question + """ + {"query": "Write a simple code for reversing string"} + """ + Then The status code of the response is 400 + And The body of the response is the following + """ + {"detail": "No Authorization header found"} + """ + + Scenario: Check if LLM responds to sent question with error when attempting to access conversation + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"conversation_id": "123e4567-e89b-12d3-a456-426614174000", "query": "Write a simple code for reversing string"} + """ + Then The status code of the response is 403 + And The body of the response contains User is not authorized to access this resource + +Scenario: Check if LLM responds for query request with error for missing query + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"provider": "openai"} + """ + Then The status code of the response is 422 + And The body of the response is the following + """ + { "detail": [{"type": "missing", "loc": [ "body", "query" ], "msg": "Field required", "input": {"provider": "openai"}}] } + """ + + Scenario: Check if LLM responds for query request with error for missing model + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Say hello", "provider": "openai"} + """ + Then The status code of the response is 422 + And The body of the response contains Value error, Model must be specified if provider is specified + + Scenario: Check if LLM responds for query request with error for missing provider + Given The system is in default state + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Say hello", "model": "gpt-4-turbo"} + """ + Then The status code of the response is 422 + And The body of the response contains Value error, Provider must be specified if model is specified + + Scenario: Check if LLM responds for query request with error for missing provider + Given The system is in default state + And The llama-stack connection is disrupted + And I set the Authorization header to Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva + When I use "query" to ask question with authorization header + """ + {"query": "Say hello"} + """ + Then The status code of the response is 500 + And The body of the response contains Unable to connect to Llama Stack \ No newline at end of file diff --git a/tests/e2e/features/steps/llm_query_response.py b/tests/e2e/features/steps/llm_query_response.py index e8fc14c4..f1e67e0d 100644 --- a/tests/e2e/features/steps/llm_query_response.py +++ b/tests/e2e/features/steps/llm_query_response.py @@ -30,6 +30,27 @@ def ask_question(context: Context, endpoint: str) -> None: context.response = requests.post(url, json=data, timeout=DEFAULT_LLM_TIMEOUT) +@step('I use "{endpoint}" to ask question with authorization header') +def ask_question_authorized(context: Context, endpoint: str) -> None: + """Call the service REST API endpoint with question.""" + base = f"http://{context.hostname}:{context.port}" + path = f"{context.api_prefix}/{endpoint}".replace("//", "/") + url = base + path + + # Use context.text if available, otherwise use empty query + data = json.loads(context.text or "{}") + print(data) + context.response = requests.post( + url, json=data, headers=context.auth_headers, timeout=DEFAULT_LLM_TIMEOUT + ) + + +@step("I store conversation details") +def store_conversation_details(context: Context) -> None: + """Store details about the conversation.""" + context.response_data = json.loads(context.response.text) + + @step('I use "{endpoint}" to ask question with same conversation_id') def ask_question_in_same_conversation(context: Context, endpoint: str) -> None: """Call the service REST API endpoint with question, but use the existing conversation id.""" @@ -39,10 +60,13 @@ def ask_question_in_same_conversation(context: Context, endpoint: str) -> None: # Use context.text if available, otherwise use empty query data = json.loads(context.text or "{}") + headers = context.auth_headers if hasattr(context, "auth_headers") else {} data["conversation_id"] = context.response_data["conversation_id"] print(data) - context.response = requests.post(url, json=data, timeout=DEFAULT_LLM_TIMEOUT) + context.response = requests.post( + url, json=data, headers=headers, timeout=DEFAULT_LLM_TIMEOUT + ) @then("The response should have proper LLM response format") diff --git a/tests/e2e/features/streaming_query.feature b/tests/e2e/features/streaming_query.feature index 6e75f17b..ea5a3b57 100644 --- a/tests/e2e/features/streaming_query.feature +++ b/tests/e2e/features/streaming_query.feature @@ -62,7 +62,7 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds for streaming_query request with error for missing query Given The system is in default state - And I use "streaming_query" to ask question + When I use "streaming_query" to ask question """ {"provider": "openai"} """ @@ -74,7 +74,7 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds for streaming_query request with error for missing model Given The system is in default state - And I use "streaming_query" to ask question + When I use "streaming_query" to ask question """ {"query": "Say hello", "provider": "openai"} """ @@ -83,7 +83,7 @@ Feature: streaming_query endpoint API tests Scenario: Check if LLM responds for streaming_query request with error for missing provider Given The system is in default state - And I use "streaming_query" to ask question + When I use "streaming_query" to ask question """ {"query": "Say hello", "model": "gpt-4-turbo"} """