diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 7aa362e494e0..8fc98e5d506c 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -16247,6 +16247,36 @@ "output_cost_per_token": 0.0, "supports_function_calling": true }, + "ollama/deepseek-v3.1:671b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/gpt-oss:120b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/gpt-oss:20b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, "ollama/internlm2_5-20b-chat": { "input_cost_per_token": 0.0, "litellm_provider": "ollama", @@ -16408,6 +16438,16 @@ "mode": "completion", "output_cost_per_token": 0.0 }, + "ollama/qwen3-coder:480b-cloud": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, "ollama/vicuna": { "input_cost_per_token": 0.0, "litellm_provider": "ollama",