diff --git a/README.md b/README.md
index af6b814..524c121 100644
--- a/README.md
+++ b/README.md
@@ -47,7 +47,7 @@ Just remember that the models do make mistakes at times. They might misunderstan
3. Open Excel, choose a provider from the drop-down menu in the Cellm tab, and plug in your API key.
-You can also use local models, e.g., via [Ollama](https://ollama.com/). Download and install [Ollama](https://ollama.com/), open Windows Terminal (open start menu, type `Windows Terminal`, and click `OK`), type `ollama pull gemma3:4b`, and wait for the download to finish. Open Excel, choose the Ollama provider from the drop-down menu in the Cellm tab, and you are good to go.
+You can also use local models, e.g., via [Ollama](https://ollama.com/). Download and install [Ollama](https://ollama.com/) and open Excel. Choose the Ollama provider from the drop-down menu in the Cellm tab and select a model. Cellm will prompt you to download it automatically. Alternatively, open Windows Terminal (open start menu, type `Windows Terminal`, and click `OK`), type `ollama pull gemma4:e4b`, and wait for the download to finish.
## Pricing
- **Free tier:** Use local models or your own API keys
@@ -56,7 +56,7 @@ You can also use local models, e.g., via [Ollama](https://ollama.com/). Download
## Basic usage
-Select a cell and type `=PROMPT("What model are you and who made you?")`. For Gemma 3 4B, it will tell you that it's called "Gemma" and made by Google DeepMind.
+Select a cell and type `=PROMPT("What model are you and who made you?")`. For Gemma 4 E4B, it will tell you that it's called "Gemma 4" and made by Google DeepMind.
You can also use cell references. For example, copy a news article into cell A1 and type in cell B1: `=PROMPT("Extract all person names mentioned in the text", A1)`. You can reference many cells using standard Excel notation, e.g. `=PROMPT("Extract all person names in the cells", A1:F10)` or reference multiple separate ranges, e.g. `=PROMPT("Compare these datasets", A1:B10, D1:E10)`
diff --git a/docs/api-reference/functions/prompt-model.mdx b/docs/api-reference/functions/prompt-model.mdx
index a22cd84..4b1d51c 100644
--- a/docs/api-reference/functions/prompt-model.mdx
+++ b/docs/api-reference/functions/prompt-model.mdx
@@ -7,7 +7,7 @@ Allows you to call a model from a cell formula and specify the model as the firs
## Arguments
- A string on the form "provider/model" (e.g., "openai/gpt-4o-mini").
+ A string on the form "provider/model" (e.g., "openai/gpt-5.4-mini").
The default model is determined by your configuration settings.
@@ -44,23 +44,23 @@ Allows you to call a model from a cell formula and specify the model as the firs
```excel Text Instructions
-=PROMPTMODEL("openai/gpt-4o-mini", "Extract keywords")
+=PROMPTMODEL("openai/gpt-5.4-mini", "Extract keywords")
```
```excel Cell Instructions
-=PROMPTMODEL("openai/gpt-4o-mini", A1:D10)
+=PROMPTMODEL("openai/gpt-5.4-mini", A1:D10)
```
```excel With Context
-=PROMPTMODEL("openai/gpt-4o-mini", "Extract keywords", A1:D10)
+=PROMPTMODEL("openai/gpt-5.4-mini", "Extract keywords", A1:D10)
```
```excel Multiple Cell Ranges
-=PROMPTMODEL("openai/gpt-4o-mini", "Compare these datasets", A1:B10, D1:E10)
+=PROMPTMODEL("openai/gpt-5.4-mini", "Compare these datasets", A1:B10, D1:E10)
```
```excel Mixed Cell References
-=PROMPTMODEL("openai/gpt-4o-mini", "Analyze all data", A1, B2:C5, D6)
+=PROMPTMODEL("openai/gpt-5.4-mini", "Analyze all data", A1, B2:C5, D6)
```
@@ -68,15 +68,15 @@ Allows you to call a model from a cell formula and specify the model as the firs
```excel TOROW
-=PROMPTMODEL.TOROW("openai/gpt-4o-mini", "Extract keywords", A1:D10)
+=PROMPTMODEL.TOROW("openai/gpt-5.4-mini", "Extract keywords", A1:D10)
```
```excel TOCOLUMN
-=PROMPTMODEL.TOCOLUMN("openai/gpt-4o-mini", "Extract keywords", A1:D10)
+=PROMPTMODEL.TOCOLUMN("openai/gpt-5.4-mini", "Extract keywords", A1:D10)
```
```excel TORANGE
-=PROMPTMODEL.TORANGE("openai/gpt-4o-mini", "Extract keywords", A1:D10)
+=PROMPTMODEL.TORANGE("openai/gpt-5.4-mini", "Extract keywords", A1:D10)
```
diff --git a/docs/get-started/install.mdx b/docs/get-started/install.mdx
index b902a20..b7b48e8 100644
--- a/docs/get-started/install.mdx
+++ b/docs/get-started/install.mdx
@@ -40,11 +40,11 @@ To install Cellm:
If you want to use the more powerful hosted models right away, you can skip this step. The [Hosted Models](/models/hosted-models) section shows you how.
- To get started with local models, we recommend you try out the Gemma 3 4B model with quantized aware training. Gemma 3 4B is a wonderful little model that will run fine on your CPU, ensuring no data ever leaves your computer. And it's free.
+ To get started with local models, we recommend you try out the Gemma 4 E4B model. Gemma 4 E4B is a wonderful little model that will run fine on your CPU, ensuring no data ever leaves your computer. And it's free.
1. Download and install [Ollama](https://ollama.com/). Ollama will start after the install and automatically run whenever you start up your computer.
- 2. Open the Windows Terminal, type `ollama pull gemma3:4b-it-qat` and hit Enter.
- 3. Open Excel and type `=PROMPT("What model are you and who made you?")`. The model will respond that is is Gemma 3 and made by Google.
+ 2. When you select an Ollama model in Cellm, it will prompt you to download it automatically. Alternatively, open the Windows Terminal, type `ollama pull gemma4:e4b` and hit Enter.
+ 3. Open Excel and type `=PROMPT("What model are you and who made you?")`. The model will respond that it is Gemma 4 and made by Google.
diff --git a/docs/get-started/quickstart.mdx b/docs/get-started/quickstart.mdx
index 65a0db0..8ef1f14 100644
--- a/docs/get-started/quickstart.mdx
+++ b/docs/get-started/quickstart.mdx
@@ -17,8 +17,8 @@ To get started, you can quickly install Cellm and a local model:
Download and install [Ollama](https://ollama.com/) to run local AI models.
-
- Open the Windows Terminal and type `ollama pull gemma3:4b-it-qat` to download the Gemma 3 4B model.
+
+ When you select an Ollama model in Cellm, it will prompt you to download it automatically. Alternatively, open the Windows Terminal and type `ollama pull gemma4:e4b` to download the Gemma 4 E4B model.
@@ -143,7 +143,7 @@ Beyond basic text processing, you can use "Function Calling" to give models acce
````
- Gemma 3 4B does not support function calling. For function calling you must use another model, e.g. OpenAI's `gpt-5-mini`.
+ Gemma 4 E4B does not support function calling. For function calling you must use another model, e.g. OpenAI's `gpt-5.4-mini`.
## Next steps
diff --git a/docs/models/choosing-model.mdx b/docs/models/choosing-model.mdx
index 9c7a9e8..6758fff 100644
--- a/docs/models/choosing-model.mdx
+++ b/docs/models/choosing-model.mdx
@@ -91,12 +91,12 @@ Imagine you want to analyze customer feedback from column A. Instead of a single
3. Extract Suggestions: In column D, use a Large model to analyze the feedback and suggest improvements. You could also add relevant background information on your product directly to the prompt or to a cell that you reference.
````mdx Analyze feedback
- =PROMPTMODEL("openai/gpt-4o-mini", "Analyze user feedback and suggest improvements.", B2)
+ =PROMPTMODEL("openai/gpt-5.4-mini", "Analyze user feedback and suggest improvements.", B2)
````
4. Extract Topics: In column E, extract relevant topics with a small model, which is efficient for simple extraction tasks.
````mdx Extract topics
- =PROMPTMODEL.TOROW("openai/gpt-4o-mini", "Extract relevant software engineering topics, such as UX, Bug, Documentation, or Improvement.", B2)
+ =PROMPTMODEL.TOROW("openai/gpt-5.4-mini", "Extract relevant software engineering topics, such as UX, Bug, Documentation, or Improvement.", B2)
````
This approach gives you reliable results and granular control of the output format.
diff --git a/docs/models/hosted-models.mdx b/docs/models/hosted-models.mdx
index cbd4c5a..2dbe5ca 100644
--- a/docs/models/hosted-models.mdx
+++ b/docs/models/hosted-models.mdx
@@ -20,7 +20,7 @@ We split hosted models into three tiers based on their size and capabilities, ba
| Speed | | | |
| Intelligence | | | |
| World Knowledge | | | |
-| Recommended model | Gemini 2.5 Flash Lite | Gemini 2.5 Flash | Claude Sonnet 4.5 |
+| Recommended model | Gemini 3.1 Flash Lite | Gemini 3 Flash | Claude Opus 4.6 |
## Provider setup
@@ -63,7 +63,7 @@ Mistral offers a generous free tier with access to powerful models.
### OpenAI
-OpenAI provides access to GPT models, including GPT-4o and GPT-4o-mini.
+OpenAI provides access to GPT models, including GPT-5.4 and GPT-5.4-mini.
@@ -79,7 +79,7 @@ OpenAI provides access to GPT models, including GPT-4o and GPT-4o-mini.
In Excel, open Cellm's ribbon menu, select the `openai` provider, click the provider icon, and paste your API key. Try a model like:
````mdx OpenAI example
- =PROMPTMODEL("openai/gpt-4o-mini", "Classify sentiment as positive, neutral, or negative", A1)
+ =PROMPTMODEL("openai/gpt-5.4-mini", "Classify sentiment as positive, neutral, or negative", A1)
````
@@ -106,7 +106,7 @@ Google Gemini offers powerful AI models with a generous free tier.
In Excel, open Cellm's ribbon menu, select the `gemini` provider, click the provider icon, and paste your API key. Try a model like:
````mdx Gemini example
- =PROMPTMODEL("gemini/gemini-2.5-flash", "Extract person names from text", A1)
+ =PROMPTMODEL("gemini/gemini-3-flash-preview", "Extract person names from text", A1)
````
@@ -136,7 +136,7 @@ Anthropic provides Claude models, known for their strong reasoning capabilities.
In Excel, open Cellm's ribbon menu, select the `anthropic` provider, click the provider icon, and paste your API key. Try a model like:
````mdx Claude example
- =PROMPTMODEL("anthropic/claude-sonnet-4.5", "Analyze customer feedback", A1)
+ =PROMPTMODEL("anthropic/claude-sonnet-4-6", "Analyze customer feedback", A1)
````
diff --git a/docs/models/local-models.mdx b/docs/models/local-models.mdx
index 31cb2fe..f5238a2 100644
--- a/docs/models/local-models.mdx
+++ b/docs/models/local-models.mdx
@@ -16,7 +16,7 @@ We can split local models into three tiers based on their size and capabilities,
| Speed | | | |
| Intelligence | | | |
| World Knowledge | | | |
-| Recommended model | Gemma 3 4B IT QAT | Mistral Small 3.2 | qwen3-30b-a3b-instruct-2507 |
+| Recommended model | Gemma 4 E4B | Gemma 4 26B | Gemma 4 31B |
You need a GPU for any of the medium or large models to be useful in practice. If you don't have a GPU, you can use [Hosted Models](/models/hosted-models) if small ones are insufficient.
@@ -38,29 +38,29 @@ You need to run a program on your computer that serves models to Cellm. We call
### Ollama
-To get started with Ollama, we recommend you try out the Gemma 3 4B IT QAT model, which is Cellm's default local model.
+To get started with Ollama, we recommend you try out the Gemma 4 E4B model, which is Cellm's default local model.
Download and install [Ollama](https://ollama.com/). Ollama will start after the install and automatically run whenever you start up your computer.
- Open Windows Terminal (open start menu, type `Windows Terminal`, and click `OK`), then run:
+ When you select an Ollama model in Cellm, it will prompt you to download it automatically. Alternatively, open Windows Terminal (open start menu, type `Windows Terminal`, and click `OK`), then run:
- ````bash Download Gemma 3 4B QAT
- ollama pull gemma3:4b-it-qat
+ ````bash Download Gemma 4 E4B
+ ollama pull gemma4:e4b
````
Wait for the download to finish.
- In Excel, select `ollama/gemma3:4b-it-qat` from the model dropdown menu, and type:
+ In Excel, select `ollama/gemma4:e4b` from the model dropdown menu, and type:
````mdx Test prompt
=PROMPT("Which model are you and who made you?")
````
- The model will tell you that it is called "Gemma" and made by Google DeepMind.
+ The model will tell you that it is called "Gemma 4" and made by Google DeepMind.
@@ -133,7 +133,7 @@ If you prefer to run models via docker, both Ollama and vLLM are packaged up wit
````
- Start Excel and select the `openaicompatible` provider from the model drop-down on Cellm's ribbon menu. Enter the model name you want to use, e.g., `gemma3:4b-it-qat`.
+ Start Excel and select the `openaicompatible` provider from the model drop-down on Cellm's ribbon menu. Enter the model name you want to use, e.g., `gemma4:e4b`.
Set the Base Address to `http://localhost:11434`.
diff --git a/docs/sdk-migration.md b/docs/sdk-migration.md
new file mode 100644
index 0000000..d7a669d
--- /dev/null
+++ b/docs/sdk-migration.md
@@ -0,0 +1,33 @@
+# SDK Migration: Anthropic, Mistral, and Gemini
+
+## Summary
+
+Three provider SDKs need replacing due to incompatibilities and maintainability concerns. Both community SDKs (`Anthropic.SDK`, `Mistral.SDK`) are maintained by the same author (tghamm) and have become inactive. The Gemini provider uses an OpenAI-compatible endpoint that doesn't fully support tool use schemas.
+
+## ~~Anthropic.SDK (5.10.0)~~ ✅ DONE
+
+Migrated to official `Anthropic` SDK (v12.11.0). The community `Anthropic.SDK` was incompatible with MEAI 10.4.x (`MissingMethodException` on `HostedMcpServerTool.AuthorizationToken`). The official SDK has native IChatClient support and accepts custom HttpClient, so the resilient HttpClient pipeline is preserved. Also fixed a bug where the entitlement check referenced `EnableAzureProvider` instead of `EnableAnthropicProvider`. Removed the `RateLimitsExceeded` exception from `RateLimiterHelpers` (was Anthropic.SDK-specific; 429 status is already handled by `retryableStatusCodes`). All 4 integration tests pass.
+
+## ~~Mistral.SDK (2.3.1)~~ ✅ DONE
+
+Migrated both `AddMistralChatClient()` and `AddCellmChatClient()` to use `OpenAIClient` with custom endpoint, same pattern as DeepSeek and OpenRouter. Removed `Mistral.SDK` dependency entirely. All 4 Mistral integration tests pass (basic prompt, file reader, file search, Playwright MCP).
+
+**Known issue: Magistral thinking models.** The OpenAI .NET SDK cannot deserialize Magistral's `thinking` content part type (`ArgumentOutOfRangeException: Unknown ChatMessageContentPartKind value: thinking`). The failure occurs at the deserialization level before `MistralThinkingBehavior` can process the response. This is a limitation of using the OpenAI SDK with Mistral's extended thinking format. Magistral models (`magistral-small-2509`, `magistral-medium-2509`) are currently broken.
+
+## ~~Gemini (OpenAI-compatible endpoint)~~ ✅ DONE
+
+Migrated to official `Google.GenAI` SDK (v1.6.1). The OpenAI-compatible endpoint rejected `strict: true` / `additionalProperties: false` in tool schemas. The native SDK handles tool schemas correctly. All 4 integration tests pass (basic prompt, file reader, file search, Playwright MCP).
+
+**Tradeoff:** Google.GenAI does not support custom HttpClient injection, so HTTP-level retry/timeout from the resilience pipeline is not available for Gemini. Rate limiting (application layer) is unaffected. `GeminiTemperatureBehavior` (0-1 → 0-2 scaling) is still needed — the native SDK passes temperature as-is.
+
+## Additional considerations
+
+When switching SDKs, provider-specific behaviors and other code may need updating. Examples include but are not limited to:
+
+- `GeminiTemperatureBehavior` — temperature scaling may differ with native SDK
+- `AdditionalPropertiesBehavior` — provider-specific additional properties format may change
+- `ProviderRequestHandler.UseJsonSchemaResponseFormat()` — structured output support flags
+- Provider configuration classes (`SupportsJsonSchemaResponses`, `SupportsStructuredOutputWithTools`) — verify accuracy with new SDKs
+- Resilient HTTP client integration — new SDKs may handle HTTP clients differently
+
+A thorough review of all provider-specific code paths is needed during migration.
diff --git a/docs/usage/writing-prompts.mdx b/docs/usage/writing-prompts.mdx
index 25872cc..207ee7a 100644
--- a/docs/usage/writing-prompts.mdx
+++ b/docs/usage/writing-prompts.mdx
@@ -104,7 +104,7 @@ For advanced workflows, you might want to use different AI models for different
The first argument consists of a provider and a model name separated by a forward slash (`/`). For example, if you want to use OpenAI's cheapest model in a particular cell, you can write:
````mdx Specify model
-=PROMPTMODEL("openai/gpt-4o-mini", "Rate sentiment as positive, neutral, or negative", A1)
+=PROMPTMODEL("openai/gpt-5.4-mini", "Rate sentiment as positive, neutral, or negative", A1)
````
This is useful when you want to use a strong model by default but offload simple tasks to cheaper models.
diff --git a/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.Designer.cs b/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.Designer.cs
new file mode 100644
index 0000000..1d9cc68
--- /dev/null
+++ b/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.Designer.cs
@@ -0,0 +1,71 @@
+namespace Cellm.AddIn.UserInterface.Forms;
+
+partial class OllamaModelPullForm
+{
+ private System.Windows.Forms.Label statusLabel;
+ private System.Windows.Forms.ProgressBar progressBar;
+ private System.Windows.Forms.Button cancelButton;
+
+ #region Windows Form Designer generated code
+
+ private void InitializeComponent()
+ {
+ statusLabel = new Label();
+ progressBar = new ProgressBar();
+ cancelButton = new Button();
+ SuspendLayout();
+ //
+ // statusLabel
+ //
+ statusLabel.AutoSize = true;
+ statusLabel.Location = new Point(14, 15);
+ statusLabel.Margin = new Padding(4, 0, 4, 0);
+ statusLabel.Name = "statusLabel";
+ statusLabel.Size = new Size(73, 15);
+ statusLabel.TabIndex = 0;
+ statusLabel.Text = "Downloading...";
+ //
+ // progressBar
+ //
+ progressBar.Anchor = AnchorStyles.Top | AnchorStyles.Left | AnchorStyles.Right;
+ progressBar.Location = new Point(14, 40);
+ progressBar.Margin = new Padding(4, 3, 4, 3);
+ progressBar.Name = "progressBar";
+ progressBar.Size = new Size(355, 23);
+ progressBar.TabIndex = 1;
+ //
+ // cancelButton
+ //
+ cancelButton.Anchor = AnchorStyles.Bottom | AnchorStyles.Right;
+ cancelButton.DialogResult = DialogResult.Cancel;
+ cancelButton.Location = new Point(281, 76);
+ cancelButton.Margin = new Padding(4, 3, 4, 3);
+ cancelButton.Name = "cancelButton";
+ cancelButton.Size = new Size(88, 27);
+ cancelButton.TabIndex = 2;
+ cancelButton.Text = "Cancel";
+ cancelButton.UseVisualStyleBackColor = true;
+ cancelButton.Click += cancelButton_Click;
+ //
+ // OllamaModelPullForm
+ //
+ AutoScaleDimensions = new SizeF(7F, 15F);
+ AutoScaleMode = AutoScaleMode.Font;
+ CancelButton = cancelButton;
+ ClientSize = new Size(383, 116);
+ Controls.Add(cancelButton);
+ Controls.Add(progressBar);
+ Controls.Add(statusLabel);
+ FormBorderStyle = FormBorderStyle.FixedDialog;
+ Margin = new Padding(4, 3, 4, 3);
+ MaximizeBox = false;
+ MinimizeBox = false;
+ Name = "OllamaModelPullForm";
+ ShowInTaskbar = false;
+ StartPosition = FormStartPosition.CenterParent;
+ Text = "Downloading Model";
+ ResumeLayout(false);
+ PerformLayout();
+ }
+ #endregion
+}
diff --git a/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.cs b/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.cs
new file mode 100644
index 0000000..f31d5a0
--- /dev/null
+++ b/src/Cellm/AddIn/UserInterface/Forms/OllamaModelPullForm.cs
@@ -0,0 +1,80 @@
+using OllamaSharp;
+using OllamaSharp.Models;
+
+namespace Cellm.AddIn.UserInterface.Forms;
+
+public partial class OllamaModelPullForm : Form
+{
+ private readonly OllamaApiClient _client;
+ private readonly string _modelName;
+ private CancellationTokenSource? _cts;
+
+ public bool PullSucceeded { get; private set; }
+
+ public OllamaModelPullForm(OllamaApiClient client, string modelName)
+ {
+ InitializeComponent();
+
+ _client = client;
+ _modelName = modelName;
+
+ statusLabel.Text = $"Downloading {modelName}...";
+ }
+
+ protected override async void OnShown(EventArgs e)
+ {
+ base.OnShown(e);
+
+ _cts = new CancellationTokenSource();
+
+ try
+ {
+ var request = new PullModelRequest { Model = _modelName };
+
+ await foreach (var response in _client.PullModelAsync(request, _cts.Token))
+ {
+ if (response?.Percent > 0)
+ {
+ progressBar.Value = Math.Min((int)response.Percent, 100);
+ statusLabel.Text = $"Downloading {_modelName}... {(int)response.Percent}%";
+ }
+ else
+ {
+ statusLabel.Text = $"Downloading {_modelName}...";
+ }
+ }
+
+ PullSucceeded = true;
+ DialogResult = DialogResult.OK;
+ Close();
+ }
+ catch (OperationCanceledException)
+ {
+ PullSucceeded = false;
+ DialogResult = DialogResult.Cancel;
+ Close();
+ }
+ catch (Exception ex)
+ {
+ PullSucceeded = false;
+ MessageBox.Show($"Failed to download model: {ex.Message}", "Cellm", MessageBoxButtons.OK, MessageBoxIcon.Error);
+ DialogResult = DialogResult.Cancel;
+ Close();
+ }
+ }
+
+ private void cancelButton_Click(object sender, EventArgs e)
+ {
+ _cts?.Cancel();
+ }
+
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _cts?.Dispose();
+ }
+
+ base.Dispose(disposing);
+ }
+}
diff --git a/src/Cellm/AddIn/UserInterface/Ribbon/RibbonModelGroup.cs b/src/Cellm/AddIn/UserInterface/Ribbon/RibbonModelGroup.cs
index d157224..6a9c4d4 100644
--- a/src/Cellm/AddIn/UserInterface/Ribbon/RibbonModelGroup.cs
+++ b/src/Cellm/AddIn/UserInterface/Ribbon/RibbonModelGroup.cs
@@ -21,6 +21,7 @@
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
+using OllamaSharp;
namespace Cellm.AddIn.UserInterface.Ribbon;
@@ -292,6 +293,47 @@ public void OnModelComboBoxChange(IRibbonControl control, string text)
{
_logger.LogError("ERROR updating DefaultModel setting '{configKey}' to '{text}': {ex.Message}", configKey, text, ex.Message);
}
+
+ if (provider == Provider.Ollama)
+ {
+ CheckAndPullOllamaModel(text);
+ }
+ }
+
+ private void CheckAndPullOllamaModel(string modelName)
+ {
+ try
+ {
+ var ollamaConfiguration = CellmAddIn.Services.GetRequiredService>();
+ var httpClient = new HttpClient { BaseAddress = ollamaConfiguration.CurrentValue.BaseAddress };
+ var client = new OllamaApiClient(httpClient, modelName);
+
+ var models = Task.Run(() => client.ListLocalModelsAsync()).GetAwaiter().GetResult();
+ var modelExists = models.Any(m => m.Name == modelName || m.Name == $"{modelName}:latest");
+
+ if (modelExists)
+ {
+ return;
+ }
+
+ var result = MessageBox.Show(
+ $"{modelName} was not found on your machine. Do you want to download it?",
+ "Cellm",
+ MessageBoxButtons.OKCancel,
+ MessageBoxIcon.Question);
+
+ if (result != DialogResult.OK)
+ {
+ return;
+ }
+
+ using var pullForm = new OllamaModelPullForm(client, modelName);
+ pullForm.ShowDialog();
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError("Error checking Ollama model '{modelName}': {message}", modelName, ex.Message);
+ }
}
///