Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion binary/src/IpcMessenger.ts
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class IPCMessengerBase<
truncatedLine =
line.substring(0, 100) + "..." + line.substring(line.length - 100);
}
console.error("Error parsing line: ", truncatedLine, e);
console.error("Error parsing JSON from line: ", truncatedLine);
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Nov 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dropping the caught error from the log removes stack/context information, making JSON parse failures hard to diagnose. Please log the error object along with the truncated line.

Prompt for AI agents
Address the following comment on binary/src/IpcMessenger.ts at line 91:

<comment>Dropping the caught error from the log removes stack/context information, making JSON parse failures hard to diagnose. Please log the error object along with the truncated line.</comment>

<file context>
@@ -88,7 +88,7 @@ class IPCMessengerBase&lt;
           line.substring(0, 100) + &quot;...&quot; + line.substring(line.length - 100);
       }
-      console.error(&quot;Error parsing line: &quot;, truncatedLine, e);
+      console.error(&quot;Error parsing JSON from line: &quot;, truncatedLine);
       return;
     }
</file context>
Suggested change
console.error("Error parsing JSON from line: ", truncatedLine);
console.error("Error parsing JSON from line: ", truncatedLine, e);
Fix with Cubic

return;
}
}
Expand Down
2 changes: 1 addition & 1 deletion binary/src/TcpMessenger.ts
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ export class TcpMessenger<
truncatedLine =
line.substring(0, 100) + "..." + line.substring(line.length - 100);
}
console.error("Error parsing line: ", truncatedLine, e);
console.error("Error parsing JSON from line: ", truncatedLine);
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Nov 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dropping the caught error object from the console.error call removes the stack trace and error message, making JSON parsing failures much harder to debug.

Prompt for AI agents
Address the following comment on binary/src/TcpMessenger.ts at line 127:

<comment>Dropping the caught error object from the console.error call removes the stack trace and error message, making JSON parsing failures much harder to debug.</comment>

<file context>
@@ -124,7 +124,7 @@ export class TcpMessenger&lt;
           line.substring(0, 100) + &quot;...&quot; + line.substring(line.length - 100);
       }
-      console.error(&quot;Error parsing line: &quot;, truncatedLine, e);
+      console.error(&quot;Error parsing JSON from line: &quot;, truncatedLine);
       return;
     }
</file context>
Suggested change
console.error("Error parsing JSON from line: ", truncatedLine);
console.error("Error parsing JSON from line: ", truncatedLine, e);
Fix with Cubic

return;
}
}
Expand Down
113 changes: 70 additions & 43 deletions core/llm/llms/Ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -161,47 +161,61 @@ class Ollama extends BaseLLM implements ModelInstaller {
private static modelsBeingInstalledMutex = new Mutex();

private fimSupported: boolean = false;
private modelInfoPromise: Promise<void> | undefined;

constructor(options: LLMOptions) {
super(options);
}

if (options.model === "AUTODETECT") {
/**
* Lazily fetch model info from Ollama's api/show endpoint.
* This is called on first use rather than in the constructor to avoid
* making HTTP requests when models are just being instantiated for config serialization.
*/
private async ensureModelInfo(): Promise<void> {
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Nov 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lazy-loading the model info leaves supportsFim() false for the first request, so the initial autocomplete call always skips the FIM path and ignores the suffix context for FIM-capable models.

Prompt for AI agents
Address the following comment on core/llm/llms/Ollama.ts at line 175:

<comment>Lazy-loading the model info leaves `supportsFim()` false for the first request, so the initial autocomplete call always skips the FIM path and ignores the suffix context for FIM-capable models.</comment>

<file context>
@@ -161,47 +161,61 @@ class Ollama extends BaseLLM implements ModelInstaller {
+   * This is called on first use rather than in the constructor to avoid
+   * making HTTP requests when models are just being instantiated for config serialization.
+   */
+  private async ensureModelInfo(): Promise&lt;void&gt; {
+    if (this.model === &quot;AUTODETECT&quot;) {
       return;
</file context>
Fix with Cubic

if (this.model === "AUTODETECT") {
return;
}
const headers: Record<string, string> = {
"Content-Type": "application/json",
};

if (this.apiKey) {
headers.Authorization = `Bearer ${this.apiKey}`;
// If already fetched or in progress, reuse the promise
if (this.modelInfoPromise) {
return this.modelInfoPromise;
}

this.fetch(this.getEndpoint("api/show"), {
method: "POST",
headers: headers,
body: JSON.stringify({ name: this._getModel() }),
})
.then(async (response) => {
this.modelInfoPromise = (async () => {
const headers: Record<string, string> = {
"Content-Type": "application/json",
};

if (this.apiKey) {
headers.Authorization = `Bearer ${this.apiKey}`;
}

try {
const response = await this.fetch(this.getEndpoint("api/show"), {
method: "POST",
headers: headers,
body: JSON.stringify({ name: this._getModel() }),
});

if (response?.status !== 200) {
// console.warn(
// "Error calling Ollama /api/show endpoint: ",
// await response.text(),
// );
return;
}

const body = await response.json();
if (body.parameters) {
const params = [];
for (const line of body.parameters.split("\n")) {
let parts = line.match(/^(\S+)\s+((?:".*")|\S+)$/);
if (parts.length < 2) {
const parts = line.match(/^(\S+)\s+((?:".*")|\S+)$/);
if (!parts || parts.length < 2) {
continue;
}
let key = parts[1];
let value = parts[2];
const key = parts[1];
const value = parts[2];
switch (key) {
case "num_ctx":
this._contextLength =
options.contextLength ?? Number.parseInt(value);
if (!this._contextLength) {
this._contextLength = Number.parseInt(value);
}
break;
case "stop":
if (!this.completionOptions.stop) {
Expand All @@ -210,9 +224,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
try {
this.completionOptions.stop.push(JSON.parse(value));
} catch (e) {
console.warn(
`Error parsing stop parameter value "{value}: ${e}`,
);
// Ignore parse errors
}
break;
default:
Expand All @@ -227,10 +239,12 @@ class Ollama extends BaseLLM implements ModelInstaller {
* it's a good indication the model supports FIM.
*/
this.fimSupported = !!body?.template?.includes(".Suffix");
})
.catch((e) => {
// console.warn("Error calling the Ollama /api/show endpoint: ", e);
});
} catch (e) {
// Silently fail - model info is optional
}
})();

return this.modelInfoPromise;
}

// Map of "continue model name" to Ollama actual model name
Expand Down Expand Up @@ -369,6 +383,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<string> {
await this.ensureModelInfo();
const headers: Record<string, string> = {
"Content-Type": "application/json",
};
Expand Down Expand Up @@ -414,6 +429,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<ChatMessage> {
await this.ensureModelInfo();
const ollamaMessages = messages.map(this._convertToOllamaMessage);
const chatOptions: OllamaChatOptions = {
model: this._getModel(),
Expand Down Expand Up @@ -565,6 +581,8 @@ class Ollama extends BaseLLM implements ModelInstaller {
}

supportsFim(): boolean {
// Note: this returns false until model info is fetched
// Could be made async if needed
return this.fimSupported;
}

Expand All @@ -574,6 +592,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
signal: AbortSignal,
options: CompletionOptions,
): AsyncGenerator<string> {
await this.ensureModelInfo();
const headers: Record<string, string> = {
"Content-Type": "application/json",
};
Expand Down Expand Up @@ -622,21 +641,29 @@ class Ollama extends BaseLLM implements ModelInstaller {
if (this.apiKey) {
headers.Authorization = `Bearer ${this.apiKey}`;
}
const response = await this.fetch(
// localhost was causing fetch failed in pkg binary only for this Ollama endpoint
this.getEndpoint("api/tags"),
{

try {
const response = await this.fetch(this.getEndpoint("api/tags"), {
method: "GET",
headers: headers,
},
);
const data = await response.json();
if (response.ok) {
return data.models.map((model: any) => model.name);
} else {
throw new Error(
"Failed to list Ollama models. Make sure Ollama is running.",
);
});
const data = await response.json();
if (response.ok) {
return data.models.map((model: any) => model.name);
} else {
console.warn(
`Ollama /api/tags returned status ${response.status}:`,
data,
);
throw new Error(
"Failed to list Ollama models. Make sure Ollama is running.",
);
}
} catch (error) {
console.warn("Failed to list Ollama models:", error);
// If Ollama is not running or returns an error, return an empty list
// This allows the application to continue without blocking on Ollama
return [];
}
}

Expand Down
6 changes: 3 additions & 3 deletions gui/src/components/OSRContextMenu.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import React, { useContext, useEffect, useRef, useState } from "react";
import useIsOSREnabled from "../hooks/useIsOSREnabled";
import { IdeMessengerContext } from "../context/IdeMessenger";
import useIsOSREnabled from "../hooks/useIsOSREnabled";
import { getPlatform } from "../util";

interface Position {
Expand Down Expand Up @@ -140,7 +140,7 @@ const OSRContextMenu = () => {
}

setPosition(null);
if (isOSREnabled && platform.current !== "mac") {
if (isOSREnabled) {
document.addEventListener("mousedown", clickHandler);
document.addEventListener("mouseleave", leaveWindowHandler);
document.addEventListener("contextmenu", contextMenuHandler);
Expand All @@ -153,7 +153,7 @@ const OSRContextMenu = () => {
};
}, [isOSREnabled]);

if (platform.current === "mac" || !isOSREnabled || !position) {
if (!isOSREnabled || !position) {
return null;
}
return (
Expand Down
2 changes: 1 addition & 1 deletion gui/src/context/IdeMessenger.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ export class IdeMessenger implements IIdeMessenger {
if (typeof vscode === "undefined") {
if (isJetBrains()) {
if (window.postIntellijMessage === undefined) {
console.log(
console.debug(
"Unable to send message: postIntellijMessage is undefined. ",
messageType,
data,
Expand Down
Loading
Loading