@@ -5,6 +5,7 @@ import { getUserConfig } from '@/utils/user-config'
55
66import { ModelNotFoundError } from '../error'
77import { makeCustomFetch } from '../fetch'
8+ import logger from '../logger'
89import { loadModel as loadLMStudioModel } from './lm-studio'
910import { middlewares } from './middlewares'
1011import { checkModelSupportThinking } from './ollama'
@@ -16,18 +17,15 @@ import { isToggleableThinkingModel } from './thinking-models'
1617import { getWebLLMEngine , WebLLMSupportedModel } from './web-llm'
1718
1819export async function getModelUserConfig ( overrides ?: { model ?: string , endpointType ?: LLMEndpointType } ) {
20+ logger . debug ( 'Detected override model' , { overrides } )
1921 const userConfig = await getUserConfig ( )
2022 const endpointType = overrides ?. endpointType ?? userConfig . llm . endpointType . get ( )
2123 const model = overrides ?. model ?? userConfig . llm . model . get ( )
22- const backendConfig = endpointType === 'lm-studio'
23- ? userConfig . llm . backends . lmStudio
24- : endpointType === 'ollama'
25- ? userConfig . llm . backends . ollama
26- : undefined
27- const baseUrl = backendConfig ? backendConfig . baseUrl . get ( ) : ''
24+
25+ const baseUrl = userConfig . llm . backends [ endpointType === 'lm-studio' ? 'lmStudio' : 'ollama' ] . baseUrl . get ( )
2826 const apiKey = userConfig . llm . apiKey . get ( )
29- const numCtx = backendConfig ? backendConfig . numCtx . get ( ) : 0
30- const enableNumCtx = backendConfig ? backendConfig . enableNumCtx . get ( ) : false
27+ const numCtx = userConfig . llm . backends [ endpointType === 'lm-studio' ? 'lmStudio' : 'ollama' ] . numCtx . get ( )
28+ const enableNumCtx = userConfig . llm . backends [ endpointType === 'lm-studio' ? 'lmStudio' : 'ollama' ] . enableNumCtx . get ( )
3129 const reasoningPreference = userConfig . llm . reasoning . get ( )
3230 const reasoning = getReasoningOptionForModel ( reasoningPreference , model )
3331 if ( ! model ) {
@@ -112,7 +110,6 @@ export async function getModel(options: {
112110 } ,
113111 } )
114112 options . onLoadingModel ?.( { type : 'finished' } )
115- // WebLLM does not support reasoning parameter, so we do not pass it
116113 model = new WebLLMChatLanguageModel (
117114 options . model ,
118115 engine ,
0 commit comments