diff --git a/README.md b/README.md index 9df65fa0..f35e89a1 100644 --- a/README.md +++ b/README.md @@ -244,7 +244,7 @@ version = "0.1.0" description = "Llama Stack runner" authors = [] dependencies = [ - "llama-stack==0.2.14", + "llama-stack==0.2.18", "fastapi>=0.115.12", "opentelemetry-sdk>=1.34.0", "opentelemetry-exporter-otlp>=1.34.0", diff --git a/docs/deployment_guide.md b/docs/deployment_guide.md index b228a262..bd337409 100644 --- a/docs/deployment_guide.md +++ b/docs/deployment_guide.md @@ -390,7 +390,7 @@ cp examples/run.yaml /tmp/llama-stack-server The output should be in this form: ```json { - "version": "0.2.14" + "version": "0.2.18" } ``` @@ -676,7 +676,7 @@ a4982f43195537b9eb1cec510fe6655f245d6d4b7236a4759808115d5d719972 description = "Default template for PDM package" authors = [] dependencies = [ - "llama-stack==0.2.14", + "llama-stack==0.2.18", "fastapi>=0.115.12", "opentelemetry-sdk>=1.34.0", "opentelemetry-exporter-otlp>=1.34.0", @@ -1071,7 +1071,7 @@ models: The output should be in this form: ```json { - "version": "0.2.14" + "version": "0.2.18" } ``` diff --git a/docs/getting_started.md b/docs/getting_started.md index 3ccb7157..da2102d8 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -24,7 +24,7 @@ It is possible to run Lightspeed Core Stack service with Llama Stack "embedded" 1. Add and install all required dependencies ```bash uv add \ - "llama-stack==0.2.17" \ + "llama-stack==0.2.18" \ "fastapi>=0.115.12" \ "opentelemetry-sdk>=1.34.0" \ "opentelemetry-exporter-otlp>=1.34.0" \ diff --git a/examples/pyproject.llamastack.toml b/examples/pyproject.llamastack.toml index 5dc3f441..327ab2d6 100644 --- a/examples/pyproject.llamastack.toml +++ b/examples/pyproject.llamastack.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Default template for PDM package" authors = [] dependencies = [ - "llama-stack==0.2.17", + "llama-stack==0.2.18", "fastapi>=0.115.12", "opentelemetry-sdk>=1.34.0", "opentelemetry-exporter-otlp>=1.34.0",