diff --git a/src/app/settings/llm/Ollama/index.tsx b/src/app/settings/llm/Ollama/index.tsx
index 513f3949be1f..cb12b4e2ba89 100644
--- a/src/app/settings/llm/Ollama/index.tsx
+++ b/src/app/settings/llm/Ollama/index.tsx
@@ -18,12 +18,12 @@ const OllamaProvider = memo(() => {
label: t('llm.checker.title'),
minWidth: undefined,
}}
+ modelList={{ showModelFetcher: true }}
provider={ModelProvider.Ollama}
showApiKey={false}
showBrowserRequest
showEndpoint
title={}
- // modelList={{ showModelFetcher: true }}
/>
);
});
diff --git a/src/libs/agent-runtime/ollama/index.ts b/src/libs/agent-runtime/ollama/index.ts
index a160f418f0f4..b36ec48f9ba3 100644
--- a/src/libs/agent-runtime/ollama/index.ts
+++ b/src/libs/agent-runtime/ollama/index.ts
@@ -4,6 +4,7 @@ import { ClientOptions } from 'openai';
import { OpenAIChatMessage } from '@/libs/agent-runtime';
import { OllamaStream } from '@/libs/agent-runtime/ollama/stream';
+import { ChatModelCard } from '@/types/llm';
import { LobeRuntimeAI } from '../BaseAI';
import { AgentRuntimeErrorType } from '../error';
@@ -64,12 +65,12 @@ export class LobeOllamaAI implements LobeRuntimeAI {
}
}
- // async models(): Promise {
- // const list = await this.client.list();
- // return list.models.map((model) => ({
- // id: model.name,
- // }));
- // }
+ async models(): Promise {
+ const list = await this.client.list();
+ return list.models.map((model) => ({
+ id: model.name,
+ }));
+ }
private buildOllamaMessages(messages: OpenAIChatMessage[]) {
return messages.map((message) => this.convertContentToOllamaMessage(message));
diff --git a/src/services/models.ts b/src/services/models.ts
index 39c19684bf74..7ab495820cd2 100644
--- a/src/services/models.ts
+++ b/src/services/models.ts
@@ -1,7 +1,10 @@
import { createHeaderWithAuth } from '@/services/_auth';
+import { useGlobalStore } from '@/store/global';
+import { modelConfigSelectors } from '@/store/global/selectors';
import { ChatModelCard } from '@/types/llm';
import { API_ENDPOINTS } from './_url';
+import { initializeWithClientStore } from './chat';
class ModelsService {
getChatModels = async (provider: string): Promise => {
@@ -10,6 +13,17 @@ class ModelsService {
provider,
});
try {
+ /**
+ * Use browser agent runtime
+ */
+ const enableFetchOnClient = modelConfigSelectors.isProviderFetchOnClient(provider)(
+ useGlobalStore.getState(),
+ );
+ if (enableFetchOnClient) {
+ const agentRuntime = await initializeWithClientStore(provider, {});
+ return agentRuntime.models();
+ }
+
const res = await fetch(API_ENDPOINTS.chatModels(provider), { headers });
if (!res.ok) return;