Skip to content

Commit 16a1f8d

Browse files
committed
feat(tests): add LLM API tests and mock responses for Ollama
1 parent 4847c2c commit 16a1f8d

File tree

4 files changed

+53
-8
lines changed

4 files changed

+53
-8
lines changed

tests/e2e/llm-api.test.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import { ollamaChatResponse } from './mock-responses/ollama/chat-response'
2+
import { ollamaPsResponse } from './mock-responses/ollama/ps'
3+
import { ollamaShowResponse } from './mock-responses/ollama/show'
4+
import { ollamaTagsResponse } from './mock-responses/ollama/tags'
5+
import { expect, test } from './utils'
6+
7+
test('navigator llm api test', async ({ page, extension, context }) => {
8+
context.route('http://localhost:11434/', async (route) => {
9+
await route.fulfill({ body: 'Ollama is running', contentType: 'plain/text' })
10+
})
11+
context.route('http://localhost:11434/api/ps', async (route) => {
12+
await route.fulfill({ body: JSON.stringify(ollamaPsResponse), contentType: 'application/json' })
13+
})
14+
context.route('http://localhost:11434/api/tags', async (route) => {
15+
await route.fulfill({ body: JSON.stringify(ollamaTagsResponse), contentType: 'application/json' })
16+
})
17+
context.route('http://localhost:11434/api/show', async (route) => {
18+
await route.fulfill({ body: JSON.stringify(ollamaShowResponse), contentType: 'application/json' })
19+
})
20+
context.route('http://localhost:11434/api/chat', async (route) => {
21+
await route.fulfill({ body: JSON.stringify(ollamaChatResponse), contentType: 'application/json' })
22+
})
23+
await extension.setStorageItem('llm.model', 'qwen3:4b')
24+
await page.goto('https://example.com')
25+
const r = await page.evaluate<string>(async () => {
26+
// @ts-expect-error - navigator.llm is injected by the extension
27+
const response = await navigator.llm.responses.create({
28+
prompt: 'Explain quantum computing in simple terms',
29+
})
30+
return response.text
31+
})
32+
expect(r).toBe(ollamaChatResponse.message.content)
33+
})
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
export const ollamaChatResponse = {
2+
model: 'qwen3:latest',
3+
created_at: '2025-09-17T05:21:21.503148Z',
4+
message: {
5+
role: 'assistant',
6+
content: 'Hello! 😊 How can I assist you today? Let me know if you need help with anything!',
7+
thinking: 'Okay, the user said "hello /think". Let me start by acknowledging their greeting. I should respond in a friendly and welcoming manner. Maybe add an emoji to keep it light. Then, I can ask how I can assist them today. Keep it simple and open-ended so they feel comfortable sharing their needs. Let me make sure the response is polite and approachable.\n',
8+
},
9+
done_reason: 'stop',
10+
done: true,
11+
total_duration: 2509131125,
12+
load_duration: 55912542,
13+
prompt_eval_count: 11,
14+
prompt_eval_duration: 247918834,
15+
eval_count: 101,
16+
eval_duration: 2204852791,
17+
}
File renamed without changes.

tests/e2e/onboarding.test.ts

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,10 @@ import { ollamaShowResponse } from './mock-responses/ollama/show'
33
import { ollamaTagsEmptyResponse } from './mock-responses/ollama/tags'
44
import { expect, test } from './utils'
55

6-
test('show download model tutorial if ollama/lm studio is running', async ({ page, extensionId, context }) => {
6+
test('show download model tutorial if ollama/lm studio is running', async ({ page, extensionId, context, extension }) => {
77
context.route('http://localhost:11434/', async (route) => {
88
await route.fulfill({ body: 'Ollama is running', contentType: 'plain/text' })
99
})
10-
context.route(/http:\/\/localhost:1234/, async (route) => {
11-
await route.abort('connectionfailed')
12-
})
1310
context.route(/\/api\/ps/, async (route) => {
1411
await route.fulfill({ body: JSON.stringify(ollamaPsEmptyResponse), contentType: 'application/json' })
1512
})
@@ -20,8 +17,7 @@ test('show download model tutorial if ollama/lm studio is running', async ({ pag
2017
await route.fulfill({ body: JSON.stringify(ollamaShowResponse), contentType: 'application/json' })
2118
})
2219
await page.goto('chrome-extension://' + extensionId + '/sidepanel.html')
23-
// @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types
24-
await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' }))
20+
await extension.setStorageItem('locale.current', 'en')
2521
await expect(page.getByText('Download a model to begin').first()).toBeVisible({ timeout: 15000 })
2622
})
2723

@@ -31,7 +27,6 @@ test('show startup tutorial if ollama is not running', async ({ page, extensionI
3127
})
3228
await extension.setStorageItem('llm.backends.lmStudio.baseUrl', 'ws://localhost:12345') // set to a non-existing ws url to avoid lm studio connection
3329
await page.goto('chrome-extension://' + extensionId + '/sidepanel.html')
34-
// @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types
35-
await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' }))
30+
await extension.setStorageItem('locale.current', 'en')
3631
await expect(page.getByText('How do you want to run AI locally').first()).toBeVisible({ timeout: 15000 })
3732
})

0 commit comments

Comments
 (0)