@@ -5,25 +5,89 @@ import {
55 assertStreamSuccess ,
66 extractTextFromEvents ,
77} from "./helpers" ;
8+ import { spawn } from "child_process" ;
89
910// Skip all tests if TEST_INTEGRATION is not set
1011const describeIntegration = shouldRunIntegrationTests ( ) ? describe : describe . skip ;
1112
1213// Ollama doesn't require API keys - it's a local service
13- // Tests require Ollama to be running with the gpt-oss:20b model installed
14+ // Tests require Ollama to be running and will pull models idempotently
15+
16+ const OLLAMA_MODEL = "gpt-oss:20b" ;
17+
18+ /**
19+ * Ensure Ollama model is available (idempotent).
20+ * Checks if model exists, pulls it if not.
21+ * Multiple tests can call this in parallel - Ollama handles deduplication.
22+ */
23+ async function ensureOllamaModel ( model : string ) : Promise < void > {
24+ return new Promise ( ( resolve , reject ) => {
25+ // Check if model exists: ollama list | grep <model>
26+ const checkProcess = spawn ( "ollama" , [ "list" ] ) ;
27+ let stdout = "" ;
28+ let stderr = "" ;
29+
30+ checkProcess . stdout . on ( "data" , ( data ) => {
31+ stdout += data . toString ( ) ;
32+ } ) ;
33+
34+ checkProcess . stderr . on ( "data" , ( data ) => {
35+ stderr += data . toString ( ) ;
36+ } ) ;
37+
38+ checkProcess . on ( "close" , ( code ) => {
39+ if ( code !== 0 ) {
40+ return reject ( new Error ( `Failed to check Ollama models: ${ stderr } ` ) ) ;
41+ }
42+
43+ // Check if model is in the list
44+ const modelLines = stdout . split ( "\n" ) ;
45+ const modelExists = modelLines . some ( ( line ) => line . includes ( model ) ) ;
46+
47+ if ( modelExists ) {
48+ console . log ( `✓ Ollama model ${ model } already available` ) ;
49+ return resolve ( ) ;
50+ }
51+
52+ // Model doesn't exist, pull it
53+ console . log ( `Pulling Ollama model ${ model } ...` ) ;
54+ const pullProcess = spawn ( "ollama" , [ "pull" , model ] , {
55+ stdio : [ "ignore" , "inherit" , "inherit" ] ,
56+ } ) ;
57+
58+ const timeout = setTimeout ( ( ) => {
59+ pullProcess . kill ( ) ;
60+ reject ( new Error ( `Timeout pulling Ollama model ${ model } ` ) ) ;
61+ } , 120000 ) ; // 2 minute timeout for model pull
62+
63+ pullProcess . on ( "close" , ( pullCode ) => {
64+ clearTimeout ( timeout ) ;
65+ if ( pullCode !== 0 ) {
66+ reject ( new Error ( `Failed to pull Ollama model ${ model } ` ) ) ;
67+ } else {
68+ console . log ( `✓ Ollama model ${ model } pulled successfully` ) ;
69+ resolve ( ) ;
70+ }
71+ } ) ;
72+ } ) ;
73+ } ) ;
74+ }
1475
1576describeIntegration ( "IpcMain Ollama integration tests" , ( ) => {
1677 // Enable retries in CI for potential network flakiness with Ollama
1778 if ( process . env . CI && typeof jest !== "undefined" && jest . retryTimes ) {
1879 jest . retryTimes ( 3 , { logErrorsBeforeRetry : true } ) ;
1980 }
2081
21- // Load tokenizer modules once before all tests (takes ~14s)
22- // This ensures accurate token counts for API calls without timing out individual tests
82+ // Load tokenizer modules and ensure model is available before all tests
2383 beforeAll ( async ( ) => {
84+ // Load tokenizers (takes ~14s)
2485 const { loadTokenizerModules } = await import ( "../../src/utils/main/tokenizer" ) ;
2586 await loadTokenizerModules ( ) ;
26- } , 30000 ) ; // 30s timeout for tokenizer loading
87+
88+ // Ensure Ollama model is available (idempotent - fast if cached)
89+ await ensureOllamaModel ( OLLAMA_MODEL ) ;
90+ } , 150000 ) ; // 150s timeout for tokenizer loading + potential model pull
2791
2892 test ( "should successfully send message to Ollama and receive response" , async ( ) => {
2993 // Setup test environment
@@ -35,7 +99,7 @@ describeIntegration("IpcMain Ollama integration tests", () => {
3599 workspaceId ,
36100 "Say 'hello' and nothing else" ,
37101 "ollama" ,
38- "gpt-oss:20b"
102+ OLLAMA_MODEL
39103 ) ;
40104
41105 // Verify the IPC call succeeded
@@ -69,7 +133,7 @@ describeIntegration("IpcMain Ollama integration tests", () => {
69133 workspaceId ,
70134 "What is the current date and time? Use the bash tool to find out." ,
71135 "ollama" ,
72- "gpt-oss:20b"
136+ OLLAMA_MODEL
73137 ) ;
74138
75139 expect ( result . success ) . toBe ( true ) ;
@@ -108,7 +172,7 @@ describeIntegration("IpcMain Ollama integration tests", () => {
108172 workspaceId ,
109173 "Read the README.md file and tell me what the first heading says." ,
110174 "ollama" ,
111- "gpt-oss:20b"
175+ OLLAMA_MODEL
112176 ) ;
113177
114178 expect ( result . success ) . toBe ( true ) ;
@@ -146,7 +210,7 @@ describeIntegration("IpcMain Ollama integration tests", () => {
146210 workspaceId ,
147211 "This should fail" ,
148212 "ollama" ,
149- "gpt-oss:20b" ,
213+ OLLAMA_MODEL ,
150214 {
151215 providerOptions : {
152216 ollama : { } ,
0 commit comments