diff --git a/content/manuals/ai/compose/model-runner.md b/content/manuals/ai/compose/model-runner.md index 29c4a87448e5..bf82c7fbfeba 100644 --- a/content/manuals/ai/compose/model-runner.md +++ b/content/manuals/ai/compose/model-runner.md @@ -40,7 +40,7 @@ services: models: smollm2: - image: ai/smollm2 + model: ai/smollm2 ``` ### How it works @@ -70,7 +70,7 @@ services: models: smollm2: - image: ai/smollm2 + model: ai/smollm2 ``` With this configuration, your `my-chat-app` service will receive: diff --git a/content/manuals/ai/compose/models-and-compose.md b/content/manuals/ai/compose/models-and-compose.md index 01c895cc7a49..f657715c1eaf 100644 --- a/content/manuals/ai/compose/models-and-compose.md +++ b/content/manuals/ai/compose/models-and-compose.md @@ -42,7 +42,7 @@ services: models: llm: - image: ai/smollm2 + model: ai/smollm2 ``` This example defines: @@ -56,7 +56,7 @@ Models support various configuration options: ```yaml models: llm: - image: ai/smollm2 + model: ai/smollm2 context_size: 1024 runtime_flags: - "--a-flag" @@ -87,9 +87,9 @@ services: models: llm: - image: ai/smollm2 + model: ai/smollm2 embedding-model: - image: ai/all-minilm + model: ai/all-minilm ``` With short syntax, the platform automatically generates environment variables based on the model name: @@ -116,9 +116,9 @@ services: models: llm: - image: ai/smollm2 + model: ai/smollm2 embedding-model: - image: ai/all-minilm + model: ai/all-minilm ``` With this configuration, your service receives: @@ -142,7 +142,7 @@ services: models: llm: - image: ai/smollm2 + model: ai/smollm2 ``` Docker Model Runner will: @@ -163,9 +163,9 @@ services: models: llm: - image: ai/smollm2 + model: ai/smollm2 # Cloud-specific configurations - labels: + x-cloud-options: - "cloud.instance-type=gpu-small" - "cloud.region=us-west-2" ```