Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
229 changes: 229 additions & 0 deletions .github/workflows/e2e_tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
# .github/workflows/e2e_test.yml
name: E2E Tests

on: [push, pull_request_target]

jobs:
e2e_tests:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}

- uses: 1arp/create-a-file-action@0.4.5
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
with:
path: '.'
isAbsolutePath: false
file: 'lightspeed-stack.yaml'
content: |
name: foo bar baz
service:
host: 0.0.0.0
port: 8080
auth_enabled: false
workers: 1
color_log: true
access_log: true
llama_stack:
# Uses a remote llama-stack service
# The instance would have already been started with a llama-stack-run.yaml file
use_as_library_client: false
# Alternative for "as library use"
# use_as_library_client: true
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
url: http://llama-stack:8321
api_key: xyzzy
user_data_collection:
feedback_disabled: false
feedback_storage: "/tmp/data/feedback"
transcripts_disabled: false
transcripts_storage: "/tmp/data/transcripts"
data_collector:
enabled: false
ingress_server_url: null
ingress_server_auth_token: null
ingress_content_service_name: null
collection_interval: 7200 # 2 hours in seconds
cleanup_after_send: true
connection_timeout_seconds: 30
authentication:
module: "noop"

- uses: 1arp/create-a-file-action@0.4.5
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
with:
path: '.'
isAbsolutePath: false
file: 'run.yaml'
content: |
version: '2'
image_name: simplest-llamastack-app
apis:
- agents
- datasetio
- eval
- inference
- post_training
- safety
- scoring
- telemetry
- tool_runtime
- vector_io
benchmarks: []
container_image: null
datasets: []
external_providers_dir: null
inference_store:
db_path: /app-root/.llama/distributions/ollama/inference_store.db
type: sqlite
logging: null
metadata_store:
db_path: /app-root/.llama/distributions/ollama/registry.db
namespace: null
type: sqlite
providers:
agents:
- config:
persistence_store:
db_path: /app-root/.llama/distributions/ollama/agents_store.db
namespace: null
type: sqlite
responses_store:
db_path: /app-root/.llama/distributions/ollama/responses_store.db
type: sqlite
provider_id: meta-reference
provider_type: inline::meta-reference
datasetio:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/huggingface_datasetio.db
namespace: null
type: sqlite
provider_id: huggingface
provider_type: remote::huggingface
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/localfs_datasetio.db
namespace: null
type: sqlite
provider_id: localfs
provider_type: inline::localfs
eval:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/meta_reference_eval.db
namespace: null
type: sqlite
provider_id: meta-reference
provider_type: inline::meta-reference
inference:
- provider_id: openai
provider_type: remote::openai
config:
api_key: ${{ env.OPENAI_API_KEY }}
post_training:
- config:
checkpoint_format: huggingface
device: cpu
distributed_backend: null
provider_id: huggingface
provider_type: inline::huggingface
safety:
- config:
excluded_categories: []
provider_id: llama-guard
provider_type: inline::llama-guard
scoring:
- config: {}
provider_id: basic
provider_type: inline::basic
- config: {}
provider_id: llm-as-judge
provider_type: inline::llm-as-judge
- config:
openai_api_key: '******'
provider_id: braintrust
provider_type: inline::braintrust
telemetry:
- config:
service_name: 'lightspeed-stack'
sinks: sqlite
sqlite_db_path: /app-root/.llama/distributions/ollama/trace_store.db
provider_id: meta-reference
provider_type: inline::meta-reference
tool_runtime:
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
vector_io:
- config:
kvstore:
db_path: /app-root/.llama/distributions/ollama/faiss_store.db
namespace: null
type: sqlite
provider_id: faiss
provider_type: inline::faiss
scoring_fns: []
server:
auth: null
host: null
port: 8321
quota: null
tls_cafile: null
tls_certfile: null
tls_keyfile: null
shields: []
vector_dbs: []

models:
- model_id: gpt-4o-mini
provider_id: openai
model_type: llm
provider_model_id: gpt-4o-mini

- name: list files
run: |
ls
cat lightspeed-stack.yaml
cat run.yaml

- name: Run service manually
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
# Debug: Check if environment variable is available for docker-compose
echo "OPENAI_API_KEY is set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')"
echo "OPENAI_API_KEY length: ${#OPENAI_API_KEY}"

docker compose --version
docker compose up -d

- name: Wait for services
run: |
echo "Waiting for services to be healthy..."
sleep 20 # adjust depending on boot time

- name: Quick connectivity test
run: |
echo "Testing basic connectivity before full test suite..."
curl -f http://localhost:8080/v1/models || {
echo "❌ Basic connectivity failed - showing logs before running full tests"
docker compose logs --tail=30
exit 1
}

- name: Run e2e tests
run: |
echo "Installing test dependencies..."
pip install uv
uv sync

echo "Running comprehensive e2e test suite..."
make test-e2e
32 changes: 32 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
services:
llama-stack:
build:
context: .
dockerfile: test.containerfile
container_name: llama-stack
ports:
- "8321:8321" # Expose llama-stack on 8321 (adjust if needed)
volumes:
- ./run.yaml:/app-root/run.yaml:Z
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
networks:
- lightspeednet

lightspeed-stack:
image: quay.io/lightspeed-core/lightspeed-stack:latest
container_name: lightspeed-stack
ports:
- "8080:8080"
volumes:
- ./lightspeed-stack.yaml:/app-root/lightspeed-stack.yaml:Z
environment:
- OPENAI_API_KEY=${OPENAI_API_KEY}
depends_on:
- llama-stack
networks:
- lightspeednet

networks:
lightspeednet:
driver: bridge
39 changes: 39 additions & 0 deletions test.containerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# vim: set filetype=dockerfile
FROM registry.access.redhat.com/ubi9/ubi-minimal

ARG APP_ROOT=/app-root

ENV PATH="$PATH:/root/.local/bin"

WORKDIR ${APP_ROOT}
COPY run.yaml ./


RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs \
python3.12 python3.12-devel python3.12-pip git tar

RUN curl -LsSf https://astral.sh/uv/install.sh | sh

RUN uv -h

RUN uv venv && \
uv pip install llama-stack \
fastapi \
opentelemetry-sdk \
opentelemetry-exporter-otlp \
opentelemetry-instrumentation \
aiosqlite \
litellm \
uvicorn \
blobfile \
datasets \
sqlalchemy \
faiss-cpu \
mcp \
autoevals \
psutil \
torch \
peft \
trl

CMD ["uv", "run", "llama", "stack", "run", "run.yaml"]