Skip to content

Commit e6e4771

Browse files
committed
fix(ThinkingMode): enhance model thinking capability checks in getModel function and ThinkingModeSwitch display
1 parent d045449 commit e6e4771

File tree

2 files changed

+47
-7
lines changed

2 files changed

+47
-7
lines changed

entrypoints/sidepanel/components/Chat/index.vue

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,10 +95,10 @@
9595
triggerStyle="ghost"
9696
/>
9797
<div
98-
v-if="isThinkingToggleable"
98+
v-if="isThinkingToggleable && isModelSupportsThinking"
9999
class="h-4 w-px bg-[#E5E7EB]"
100100
/>
101-
<ThinkingModeSwitch v-if="isThinkingToggleable" />
101+
<ThinkingModeSwitch v-if="isThinkingToggleable && isModelSupportsThinking" />
102102
</div>
103103
<div
104104
ref="sendButtonContainerRef"
@@ -128,7 +128,7 @@
128128

129129
<script setup lang="ts">
130130
import { useElementBounding } from '@vueuse/core'
131-
import { computed, onBeforeUnmount, onMounted, ref } from 'vue'
131+
import { computed, onBeforeUnmount, onMounted, ref, toRefs, watch } from 'vue'
132132
133133
import IconSendFill from '@/assets/icons/send-fill.svg?component'
134134
import IconStop from '@/assets/icons/stop.svg?component'
@@ -140,6 +140,7 @@ import Button from '@/components/ui/Button.vue'
140140
import { FileGetter } from '@/utils/file'
141141
import { useI18n } from '@/utils/i18n'
142142
import { isToggleableThinkingModel } from '@/utils/llm/thinking-models'
143+
import { useOllamaStatusStore } from '@/utils/pinia-store/store'
143144
import { setSidepanelStatus } from '@/utils/sidepanel-status'
144145
import { getUserConfig } from '@/utils/user-config'
145146
import { classNames } from '@/utils/vue/utils'
@@ -161,6 +162,8 @@ import ThinkingModeSwitch from './ThinkingModeSwitch.vue'
161162
const inputContainerRef = ref<HTMLDivElement>()
162163
const sendButtonContainerRef = ref<HTMLDivElement>()
163164
const { height: inputContainerHeight } = useElementBounding(inputContainerRef)
165+
const { modelList: ollamaModelList } = toRefs(useOllamaStatusStore())
166+
const { updateModelList: updateOllamaModelList } = useOllamaStatusStore()
164167
165168
const { t } = useI18n()
166169
const userInput = ref('')
@@ -172,10 +175,17 @@ defineExpose({
172175
attachmentSelectorRef,
173176
})
174177
178+
const updateModelList = async () => {
179+
if (endpointType.value === 'ollama') {
180+
await updateOllamaModelList()
181+
}
182+
}
183+
175184
const chat = await Chat.getInstance()
176185
const userConfig = await getUserConfig()
177186
const contextAttachmentStorage = chat.contextAttachmentStorage
178187
const currentModel = userConfig.llm.model.toRef()
188+
const endpointType = userConfig.llm.endpointType.toRef()
179189
180190
initChatSideEffects()
181191
@@ -192,6 +202,23 @@ const actionEventHandler = Chat.createActionEventHandler((actionEvent) => {
192202
}
193203
})
194204
205+
const modelList = computed(() => {
206+
if (endpointType.value === 'ollama') {
207+
return ollamaModelList.value
208+
}
209+
return []
210+
})
211+
212+
// Check if current model supports thinking
213+
const isModelSupportsThinking = computed(() => {
214+
if (endpointType.value !== 'ollama') return false
215+
if (!currentModel.value) return false
216+
if (!modelList.value || !Array.isArray(modelList.value)) return false
217+
218+
const model = modelList.value.find((m) => m.model === currentModel.value)
219+
return model?.supportsThinking ?? false
220+
})
221+
195222
const allowAsk = computed(() => {
196223
return !chat.isAnswering() && userInput.value.trim().length > 0
197224
})
@@ -235,9 +262,15 @@ const ask = async () => {
235262
userInput.value = ''
236263
}
237264
238-
onMounted(() => {
265+
// Watch for model list updates to refresh thinking capabilities (following ModelSelector pattern)
266+
watch([endpointType, currentModel], async () => {
267+
await updateModelList()
268+
})
269+
270+
onMounted(async () => {
239271
scrollContainerRef.value?.snapToBottom()
240272
setSidepanelStatus({ loaded: true })
273+
updateModelList()
241274
})
242275
243276
onBeforeUnmount(() => {

utils/llm/models.ts

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { getUserConfig } from '@/utils/user-config'
55
import { ModelNotFoundError } from '../error'
66
import { makeCustomFetch } from '../fetch'
77
import { middlewares } from './middlewares'
8+
import { getLocalModelList } from './ollama'
89
import { createOllama } from './providers/ollama'
910
import { WebLLMChatLanguageModel } from './providers/web-llm/openai-compatible-chat-language-model'
1011
import { isToggleableThinkingModel } from './thinking-models'
@@ -44,6 +45,7 @@ export async function getModel(options: {
4445
onLoadingModel?: (prg: ModelLoadingProgressEvent) => void
4546
}) {
4647
const userConfig = await getUserConfig()
48+
const modelList = await getLocalModelList()
4749
let model: LanguageModelV1
4850
const endpointType = userConfig.llm.endpointType.get()
4951
if (endpointType === 'ollama') {
@@ -53,13 +55,18 @@ export async function getModel(options: {
5355
if (options.autoThinking) return body
5456
if (typeof body !== 'string') return body
5557

56-
// add additional check to avoid errors, eg gamma3 does not support think argument
57-
const _isToggleableThinkingModel = isToggleableThinkingModel(options.model)
58+
// Models have different thinking capabilities
59+
// Edge Case: Qwen3 Instruct does not support think argument even it is toggleable
60+
// add additional check to avoid api error
61+
const currentModel = userConfig.llm.model.get()
62+
63+
const supportsToggleThinking = isToggleableThinkingModel(options.model)
64+
const supportsThinking = modelList.models.find((m) => m.model === currentModel)?.supportsThinking
5865

5966
const parsedBody = JSON.parse(body)
6067
return JSON.stringify({
6168
...parsedBody,
62-
think: _isToggleableThinkingModel ? options.reasoning : undefined,
69+
think: supportsThinking && supportsToggleThinking ? options.reasoning : undefined,
6370
})
6471
},
6572
})

0 commit comments

Comments
 (0)