diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml new file mode 100644 index 00000000..00db84f1 --- /dev/null +++ b/.github/workflows/e2e_tests.yaml @@ -0,0 +1,212 @@ +# .github/workflows/e2e_test.yml +name: E2E Tests + +on: [push, pull_request_target] + +jobs: + e2e_tests: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: 1arp/create-a-file-action@0.4.5 + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + with: + path: '.' + isAbsolutePath: false + file: 'lightspeed-stack.yaml' + content: | + name: foo bar baz + service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true + llama_stack: + # Uses a remote llama-stack service + # The instance would have already been started with a llama-stack-run.yaml file + use_as_library_client: false + # Alternative for "as library use" + # use_as_library_client: true + # library_client_config_path: + url: http://llama-stack:8321 + api_key: xyzzy + user_data_collection: + feedback_disabled: false + feedback_storage: "/tmp/data/feedback" + transcripts_disabled: false + transcripts_storage: "/tmp/data/transcripts" + data_collector: + enabled: false + ingress_server_url: null + ingress_server_auth_token: null + ingress_content_service_name: null + collection_interval: 7200 # 2 hours in seconds + cleanup_after_send: true + connection_timeout_seconds: 30 + authentication: + module: "noop" + + - uses: 1arp/create-a-file-action@0.4.5 + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + with: + path: '.' + isAbsolutePath: false + file: 'run.yaml' + content: | + version: '2' + image_name: simplest-llamastack-app + apis: + - agents + - datasetio + - eval + - inference + - post_training + - safety + - scoring + - telemetry + - tool_runtime + - vector_io + benchmarks: [] + container_image: null + datasets: [] + external_providers_dir: null + inference_store: + db_path: /app-root/.llama/distributions/ollama/inference_store.db + type: sqlite + logging: null + metadata_store: + db_path: /app-root/.llama/distributions/ollama/registry.db + namespace: null + type: sqlite + providers: + agents: + - config: + persistence_store: + db_path: /app-root/.llama/distributions/ollama/agents_store.db + namespace: null + type: sqlite + responses_store: + db_path: /app-root/.llama/distributions/ollama/responses_store.db + type: sqlite + provider_id: meta-reference + provider_type: inline::meta-reference + datasetio: + - config: + kvstore: + db_path: /app-root/.llama/distributions/ollama/huggingface_datasetio.db + namespace: null + type: sqlite + provider_id: huggingface + provider_type: remote::huggingface + - config: + kvstore: + db_path: /app-root/.llama/distributions/ollama/localfs_datasetio.db + namespace: null + type: sqlite + provider_id: localfs + provider_type: inline::localfs + eval: + - config: + kvstore: + db_path: /app-root/.llama/distributions/ollama/meta_reference_eval.db + namespace: null + type: sqlite + provider_id: meta-reference + provider_type: inline::meta-reference + inference: + - provider_id: openai + provider_type: remote::openai + config: + api_key: ${{ secrets.OPENAI_API_KEY }} + post_training: + - config: + checkpoint_format: huggingface + device: cpu + distributed_backend: null + provider_id: huggingface + provider_type: inline::huggingface + safety: + - config: + excluded_categories: [] + provider_id: llama-guard + provider_type: inline::llama-guard + scoring: + - config: {} + provider_id: basic + provider_type: inline::basic + - config: {} + provider_id: llm-as-judge + provider_type: inline::llm-as-judge + - config: + openai_api_key: '******' + provider_id: braintrust + provider_type: inline::braintrust + telemetry: + - config: + service_name: '' + sinks: sqlite + sqlite_db_path: /app-root/.llama/distributions/ollama/trace_store.db + provider_id: meta-reference + provider_type: inline::meta-reference + tool_runtime: + - provider_id: model-context-protocol + provider_type: remote::model-context-protocol + config: {} + vector_io: + - config: + kvstore: + db_path: /app-root/.llama/distributions/ollama/faiss_store.db + namespace: null + type: sqlite + provider_id: faiss + provider_type: inline::faiss + scoring_fns: [] + server: + auth: null + host: null + port: 8321 + quota: null + tls_cafile: null + tls_certfile: null + tls_keyfile: null + shields: [] + vector_dbs: [] + + models: + - model_id: gpt-4o-mini + provider_id: openai + model_type: llm + provider_model_id: gpt-4o-mini + + - name: list files + run: | + ls + cat lightspeed-stack.yaml + cat run.yaml + + - name: Run service manually + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + run: | + # Debug: Check if environment variable is available for docker-compose + echo "OPENAI_API_KEY is set: $([ -n "$OPENAI_API_KEY" ] && echo 'YES' || echo 'NO')" + echo "OPENAI_API_KEY length: ${#OPENAI_API_KEY}" + + docker compose --version + docker compose up -d + + - name: Wait for services + run: | + echo "Waiting for services to be healthy..." + sleep 20 # adjust depending on boot time + + - name: Run tests in test container + run: | + echo "We got here and the setup is done, yey" + curl http://localhost:8080/v1/models \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000..ee0c2cd7 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,32 @@ +services: + llama-stack: + build: + context: . + dockerfile: test.containerfile + container_name: llama-stack + ports: + - "8321:8321" # Expose llama-stack on 8321 (adjust if needed) + volumes: + - ./run.yaml:/app-root/run.yaml:Z + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + networks: + - lightspeednet + + lightspeed-stack: + image: quay.io/lightspeed-core/lightspeed-stack:latest + container_name: lightspeed-stack + ports: + - "8080:8080" + volumes: + - ./lightspeed-stack.yaml:/app-root/lightspeed-stack.yaml:Z + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + depends_on: + - llama-stack + networks: + - lightspeednet + +networks: + lightspeednet: + driver: bridge \ No newline at end of file diff --git a/test.containerfile b/test.containerfile new file mode 100644 index 00000000..6dfc5d2c --- /dev/null +++ b/test.containerfile @@ -0,0 +1,37 @@ +# vim: set filetype=dockerfile +FROM registry.access.redhat.com/ubi9/ubi-minimal + +ARG APP_ROOT=/app-root + +ENV PATH="$PATH:/root/.local/bin" + +ADD run.yaml ./ + +RUN microdnf install -y --nodocs --setopt=keepcache=0 --setopt=tsflags=nodocs \ + python3.12 python3.12-devel python3.12-pip git tar + +RUN curl -LsSf https://astral.sh/uv/install.sh | sh + +RUN uv -h + +RUN uv venv && \ + uv pip install llama-stack \ + fastapi \ + opentelemetry-sdk \ + opentelemetry-exporter-otlp \ + opentelemetry-instrumentation \ + aiosqlite \ + litellm \ + uvicorn \ + blobfile \ + datasets \ + sqlalchemy \ + faiss-cpu \ + mcp \ + autoevals \ + psutil \ + torch \ + peft \ + trl + +CMD ["uv", "run", "llama", "stack", "run", "run.yaml"] \ No newline at end of file diff --git a/tests/e2e/features/rest_api.feature b/tests/e2e/features/rest_api.feature index 4c41ec4b..7735ee3f 100644 --- a/tests/e2e/features/rest_api.feature +++ b/tests/e2e/features/rest_api.feature @@ -16,12 +16,13 @@ Feature: REST API tests """ { "ready": "bool", - "reason": "str" + "reason": "str", + "providers": "list" } """ And The body of the response is the following """ - {"ready": true, "reason": "service is ready"} + {"ready": true, "reason": "All providers are healthy", "providers": []} """