Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
212 changes: 212 additions & 0 deletions .github/workflows/e2e_tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
# .github/workflows/e2e_test.yml
name: E2E Tests

on: [push, pull_request_target]

jobs:
e2e_tests:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4

- uses: 1arp/create-a-file-action@0.4.5
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
with:
path: '.'
isAbsolutePath: false
file: 'lightspeed-stack.yaml'
content: |
name: foo bar baz
service:
host: 0.0.0.0
port: 8080
auth_enabled: false
workers: 1
color_log: true
access_log: true
llama_stack:
# Uses a remote llama-stack service
# The instance would have already been started with a llama-stack-run.yaml file
use_as_library_client: false
# Alternative for "as library use"
# use_as_library_client: true
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
url: http://llama-stack:8321
api_key: xyzzy
user_data_collection:
feedback_disabled: false
feedback_storage: "/tmp/data/feedback"
transcripts_disabled: false
transcripts_storage: "/tmp/data/transcripts"
data_collector:
enabled: false
ingress_server_url: null
ingress_server_auth_token: null
ingress_content_service_name: null
collection_interval: 7200 # 2 hours in seconds
cleanup_after_send: true
connection_timeout_seconds: 30
authentication:
module: "noop"

- uses: 1arp/create-a-file-action@0.4.5
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
with:
path: '.'
isAbsolutePath: false
file: 'run.yaml'
content: |
version: '2'
image_name: simplest-llamastack-app
apis:
- agents
- datasetio
- eval
- inference
- post_training
- safety
- scoring
- telemetry
- tool_runtime
- vector_io
benchmarks: []
container_image: null
datasets: []
external_providers_dir: null
inference_store:
db_path: /app-root/.llama/distributions/ollama/inference_store.db
type: sqlite
logging: null
metadata_store:
db_path: /app-root/.llama/distributions/ollama/registry.db
namespace: null
type: sqlite
providers:
agents:
- config:
persistence_store:
db_path: /app-root/.llama/distributions/ollama/agents_store.db
namespace: null
type: sqlite
responses_store:
db_path: /app-root/.llama/distributions/ollama/responses_store.db
type: sqlite
provider_id: meta-reference
provider_type: inline::meta-reference
datasetio:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/huggingface_datasetio.db
namespace: null
type: sqlite
provider_id: huggingface
provider_type: remote::huggingface
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/localfs_datasetio.db
namespace: null
type: sqlite
provider_id: localfs
provider_type: inline::localfs
eval:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/meta_reference_eval.db
namespace: null
type: sqlite
provider_id: meta-reference
provider_type: inline::meta-reference
inference:
- provider_id: openai
provider_type: remote::openai
config:
api_key: ${{ secrets.OPENAI_API_KEY }}
Comment on lines +122 to +126
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

YAML structure broken under providers.inference – indentation & secret-handling need fixing

The config key is not part of the list item because it’s out-dented relative to the preceding - provider_id … entry.
YAML will parse inference as a sequence and a mapping simultaneously, which is invalid in most parsers and will break service start-up.
While you’re there, avoid embedding the secret directly in the file—pass it via env-var instead to keep it ephemeral.

-              inference:
-                - provider_id: openai
-                  provider_type: remote::openai
-                config:
-                  api_key: ${{ secrets.OPENAI_API_KEY }}
+              inference:
+                - provider_id: openai
+                  provider_type: remote::openai
+                  config:
+                    api_key: ${OPENAI_API_KEY}   # injected at container runtime
🤖 Prompt for AI Agents
In .github/workflows/e2e_tests.yaml around lines 122 to 126, the YAML under
providers.inference is invalid because the config key is not properly indented
as part of the list item, causing a mix of sequence and mapping. Fix this by
indenting config and its api_key under the provider_id entry to keep them within
the same list item. Additionally, remove the direct use of the secret in the
YAML and instead pass the OPENAI_API_KEY via an environment variable to avoid
embedding secrets directly in the file.

post_training:
- config:
checkpoint_format: huggingface
device: cpu
distributed_backend: null
provider_id: huggingface
provider_type: inline::huggingface
safety:
- config:
excluded_categories: []
provider_id: llama-guard
provider_type: inline::llama-guard
scoring:
- config: {}
provider_id: basic
provider_type: inline::basic
- config: {}
provider_id: llm-as-judge
provider_type: inline::llm-as-judge
- config:
openai_api_key: '******'
provider_id: braintrust
provider_type: inline::braintrust
telemetry:
- config:
service_name: ''
sinks: sqlite
sqlite_db_path: /app-root/.llama/distributions/ollama/trace_store.db
provider_id: meta-reference
provider_type: inline::meta-reference
tool_runtime:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
vector_io:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/faiss_store.db
namespace: null
type: sqlite
provider_id: faiss
provider_type: inline::faiss
scoring_fns: []
server:
auth: null
host: null
port: 8321
quota: null
tls_cafile: null
tls_certfile: null
tls_keyfile: null
shields: []
vector_dbs: []

models:
- model_id: gpt-4o-mini
provider_id: openai
model_type: llm
provider_model_id: gpt-4o-mini

- name: list files
run: |
ls
cat lightspeed-stack.yaml
cat run.yaml

- name: Run service manually
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
# Debug: Check if environment variable is available for docker-compose
echo "OPENAI_API_KEY is set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')"
echo "OPENAI_API_KEY length: ${#OPENAI_API_KEY}"

docker compose --version
docker compose up -d

- name: Wait for services
run: |
echo "Waiting for services to be healthy..."
sleep 20 # adjust depending on boot time

- name: Run tests in test container
run: |
echo "We got here and the setup is done, yey"
curl http://localhost:8080/v1/models
32 changes: 32 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
services:
llama-stack:
build:
context: .
dockerfile: test.containerfile
container_name: llama-stack
ports:
- "8321:8321" # Expose llama-stack on 8321 (adjust if needed)
volumes:
- ./run.yaml:/app-root/run.yaml:Z
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
networks:
- lightspeednet

lightspeed-stack:
image: quay.io/lightspeed-core/lightspeed-stack:latest
container_name: lightspeed-stack
ports:
- "8080:8080"
volumes:
- ./lightspeed-stack.yaml:/app-root/lightspeed-stack.yaml:Z
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
depends_on:
- llama-stack
networks:
- lightspeednet

networks:
lightspeednet:
driver: bridge
37 changes: 37 additions & 0 deletions test.containerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# vim: set filetype=dockerfile
FROM registry.access.redhat.com/ubi9/ubi-minimal

ARG APP_ROOT=/app-root

ENV PATH="$PATH:/root/.local/bin"

ADD run.yaml ./

RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs \
python3.12 python3.12-devel python3.12-pip git tar

RUN curl -LsSf https://astral.sh/uv/install.sh | sh

RUN uv -h

RUN uv venv && \
uv pip install llama-stack \
fastapi \
opentelemetry-sdk \
opentelemetry-exporter-otlp \
opentelemetry-instrumentation \
aiosqlite \
litellm \
uvicorn \
blobfile \
datasets \
sqlalchemy \
faiss-cpu \
mcp \
autoevals \
psutil \
torch \
peft \
trl

CMD ["uv", "run", "llama", "stack", "run", "run.yaml"]
5 changes: 3 additions & 2 deletions tests/e2e/features/rest_api.feature
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,13 @@ Feature: REST API tests
"""
{
"ready": "bool",
"reason": "str"
"reason": "str",
"providers": "list"
}
"""
And The body of the response is the following
"""
{"ready": true, "reason": "service is ready"}
{"ready": true, "reason": "All providers are healthy", "providers": []}
"""


Expand Down