From 4a42a2b172e4286439c3e86d9a9e79f6286cc42c Mon Sep 17 00:00:00 2001 From: Justin Dorfman Date: Thu, 26 Dec 2024 10:13:58 -0800 Subject: [PATCH 1/2] Update models example json --- docs/cody/clients/install-vscode.mdx | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/cody/clients/install-vscode.mdx b/docs/cody/clients/install-vscode.mdx index 5172bac4f..8e4a3558c 100644 --- a/docs/cody/clients/install-vscode.mdx +++ b/docs/cody/clients/install-vscode.mdx @@ -368,28 +368,28 @@ Example VS Code user settings JSON configuration: ```json { "cody.dev.models": [ - // Google (e.g. Gemini 1.5 Pro) { "provider": "google", - "model": "gemini-1.5-pro-latest", - "tokens": 1000000, - "apiKey": "xyz" + "model": "gemini-2.0-flash-exp", + "inputTokens": 1048576, + "outputTokens": 8192, + "apiKey": "", + "options": { + "temperature": 0.0 + } }, - // Groq (e.g. llama2 70b) { "provider": "groq", - "model": "llama2-70b-4096", + "model": "llama2-70b-4096", "tokens": 4096, - "apiKey": "xyz" + "apiKey": "" }, - // OpenAI & OpenAI-compatible APIs { - "provider": "openai", // keep groq as provider + "provider": "openai", "model": "some-model-id", - "apiKey": "xyz", + "apiKey": "", "apiEndpoint": "https://host.domain/path" }, - // Ollama (remote) { "provider": "ollama", "model": "some-model-id", From 98ed659630e4b4c56bdac5c08979eb609c6016da Mon Sep 17 00:00:00 2001 From: Justin Dorfman Date: Thu, 26 Dec 2024 10:45:55 -0800 Subject: [PATCH 2/2] Update JSON with Pri's version --- docs/cody/clients/install-vscode.mdx | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/docs/cody/clients/install-vscode.mdx b/docs/cody/clients/install-vscode.mdx index 8e4a3558c..f9aa58ccf 100644 --- a/docs/cody/clients/install-vscode.mdx +++ b/docs/cody/clients/install-vscode.mdx @@ -366,8 +366,8 @@ Once configured, and VS Code has been restarted, you can select the configured m Example VS Code user settings JSON configuration: ```json -{ - "cody.dev.models": [ +{ + "cody.dev.models": [ { "provider": "google", "model": "gemini-2.0-flash-exp", @@ -380,14 +380,23 @@ Example VS Code user settings JSON configuration: }, { "provider": "groq", - "model": "llama2-70b-4096", - "tokens": 4096, - "apiKey": "" + "model": "llama2-70b-4096", + "inputTokens": 4000, + "outputTokens": 4000, + "apiKey": "", + "options": { + "temperature": 0.0 + } }, { "provider": "openai", "model": "some-model-id", + "inputTokens": 32000, + "outputTokens": 4000, "apiKey": "", + "options": { + "temperature": 0.0 + }, "apiEndpoint": "https://host.domain/path" }, {