diff --git a/apps/desktop/src/components/settings/views/ai.tsx b/apps/desktop/src/components/settings/views/ai.tsx index 62b74fc73..58156d2b6 100644 --- a/apps/desktop/src/components/settings/views/ai.tsx +++ b/apps/desktop/src/components/settings/views/ai.tsx @@ -60,6 +60,33 @@ const initialSttModels = [ downloaded: true, fileName: "ggml-tiny-q8_0.bin", }, + { + key: "QuantizedTinyEn", + name: "Tiny - English", + accuracy: 1, + speed: 3, + size: "44 MB", + downloaded: false, + fileName: "ggml-tiny.en-q8_0.bin", + }, + { + key: "QuantizedBase", + name: "Base", + accuracy: 2, + speed: 2, + size: "82 MB", + downloaded: false, + fileName: "ggml-base-q8_0.bin", + }, + { + key: "QuantizedBaseEn", + name: "Base - English", + accuracy: 2, + speed: 2, + size: "82 MB", + downloaded: false, + fileName: "ggml-base.en-q8_0.bin", + }, { key: "QuantizedSmall", name: "Small", @@ -69,6 +96,15 @@ const initialSttModels = [ downloaded: false, fileName: "ggml-small-q8_0.bin", }, + { + key: "QuantizedSmallEn", + name: "Small - English", + accuracy: 2, + speed: 2, + size: "264 MB", + downloaded: false, + fileName: "ggml-small.en-q8_0.bin", + }, { key: "QuantizedLargeTurbo", name: "Large", @@ -354,6 +390,67 @@ export default function LocalAI() { return apiBase && (apiBase.includes("localhost") || apiBase.includes("127.0.0.1")); }; + // call backend for the current selected LLM model and sets it + const currentLLMModel = useQuery({ + queryKey: ["current-llm-model"], + queryFn: () => localLlmCommands.getCurrentModel(), + }); + + useEffect(() => { + if (currentLLMModel.data && !customLLMEnabled.data) { + setSelectedLLMModel(currentLLMModel.data); + } + }, [currentLLMModel.data, customLLMEnabled.data]); + + // call backend for the current selected STT model and sets it + const currentSTTModel = useQuery({ + queryKey: ["current-stt-model"], + queryFn: () => localSttCommands.getCurrentModel(), + }); + + useEffect(() => { + if (currentSTTModel.data) { + setSelectedSTTModel(currentSTTModel.data); + } + }, [currentSTTModel.data]); + + // call backend for the download status of the STT models and sets it + const sttModelDownloadStatus = useQuery({ + queryKey: ["stt-model-download-status"], + queryFn: async () => { + const statusChecks = await Promise.all([ + localSttCommands.isModelDownloaded("QuantizedTiny"), + localSttCommands.isModelDownloaded("QuantizedTinyEn"), + localSttCommands.isModelDownloaded("QuantizedBase"), + localSttCommands.isModelDownloaded("QuantizedBaseEn"), + localSttCommands.isModelDownloaded("QuantizedSmall"), + localSttCommands.isModelDownloaded("QuantizedSmallEn"), + localSttCommands.isModelDownloaded("QuantizedLargeTurbo"), + ]); + return { + "QuantizedTiny": statusChecks[0], + "QuantizedTinyEn": statusChecks[1], + "QuantizedBase": statusChecks[2], + "QuantizedBaseEn": statusChecks[3], + "QuantizedSmall": statusChecks[4], + "QuantizedSmallEn": statusChecks[5], + "QuantizedLargeTurbo": statusChecks[6], + } as Record; + }, + refetchInterval: 3000, + }); + + useEffect(() => { + if (sttModelDownloadStatus.data) { + setSttModels(prev => + prev.map(model => ({ + ...model, + downloaded: sttModelDownloadStatus.data[model.key] || false, + })) + ); + } + }, [sttModelDownloadStatus.data]); + return (
@@ -389,6 +486,7 @@ export default function LocalAI() { onClick={() => { if (model.downloaded) { setSelectedSTTModel(model.key); + localSttCommands.setCurrentModel(model.key as any); } }} > @@ -526,6 +624,7 @@ export default function LocalAI() { onClick={() => { if (model.available && model.downloaded) { setSelectedLLMModel(model.key); + localLlmCommands.setCurrentModel(model.key as SupportedModel); setCustomLLMEnabledMutation.mutate(false); } }} diff --git a/apps/desktop/src/locales/en/messages.po b/apps/desktop/src/locales/en/messages.po index 7db2db91b..e6fd55953 100644 --- a/apps/desktop/src/locales/en/messages.po +++ b/apps/desktop/src/locales/en/messages.po @@ -346,12 +346,12 @@ msgstr "Annual" msgid "Anyone with the link can view this page" msgstr "Anyone with the link can view this page" -#: src/components/settings/views/ai.tsx:658 +#: src/components/settings/views/ai.tsx:757 msgid "API Base URL" msgstr "API Base URL" #: src/components/settings/views/integrations.tsx:197 -#: src/components/settings/views/ai.tsx:684 +#: src/components/settings/views/ai.tsx:783 msgid "API Key" msgstr "API Key" @@ -460,7 +460,7 @@ msgstr "Company name" #~ msgid "Connect" #~ msgstr "Connect" -#: src/components/settings/views/ai.tsx:638 +#: src/components/settings/views/ai.tsx:737 msgid "Connect to a self-hosted or third-party LLM endpoint (OpenAI API compatible)." msgstr "Connect to a self-hosted or third-party LLM endpoint (OpenAI API compatible)." @@ -492,7 +492,7 @@ msgstr "Contacts Access" msgid "Continue" msgstr "Continue" -#: src/components/settings/views/ai.tsx:770 +#: src/components/settings/views/ai.tsx:869 msgid "Control how creative the AI enhancement should be" msgstr "Control how creative the AI enhancement should be" @@ -517,7 +517,7 @@ msgstr "Create Note" msgid "Create your first template to get started" msgstr "Create your first template to get started" -#: src/components/settings/views/ai.tsx:767 +#: src/components/settings/views/ai.tsx:866 msgid "Creativity Level" msgstr "Creativity Level" @@ -525,7 +525,7 @@ msgstr "Creativity Level" msgid "Current Plan" msgstr "Current Plan" -#: src/components/settings/views/ai.tsx:635 +#: src/components/settings/views/ai.tsx:734 msgid "Custom Endpoint" msgstr "Custom Endpoint" @@ -588,7 +588,7 @@ msgstr "Enable" msgid "Enable Integration" msgstr "Enable Integration" -#: src/components/settings/views/ai.tsx:509 +#: src/components/settings/views/ai.tsx:607 msgid "Enhancing" msgstr "Enhancing" @@ -600,11 +600,11 @@ msgstr "Enter a section title" #~ msgid "Enter model name (e.g., gpt-4, llama3.2:3b)" #~ msgstr "Enter model name (e.g., gpt-4, llama3.2:3b)" -#: src/components/settings/views/ai.tsx:687 +#: src/components/settings/views/ai.tsx:786 msgid "Enter the API key for your custom LLM endpoint" msgstr "Enter the API key for your custom LLM endpoint" -#: src/components/settings/views/ai.tsx:661 +#: src/components/settings/views/ai.tsx:760 msgid "Enter the base URL for your custom LLM endpoint" msgstr "Enter the base URL for your custom LLM endpoint" @@ -767,7 +767,7 @@ msgstr "LinkedIn username" msgid "Live summary of the meeting" msgstr "Live summary of the meeting" -#: src/components/settings/views/ai.tsx:721 +#: src/components/settings/views/ai.tsx:820 msgid "Loading available models..." msgstr "Loading available models..." @@ -813,7 +813,7 @@ msgstr "Members" msgid "Microphone Access" msgstr "Microphone Access" -#: src/components/settings/views/ai.tsx:709 +#: src/components/settings/views/ai.tsx:808 msgid "Model Name" msgstr "Model Name" @@ -955,7 +955,7 @@ msgstr "Pause" msgid "people" msgstr "people" -#: src/components/settings/views/ai.tsx:371 +#: src/components/settings/views/ai.tsx:468 msgid "Performance difference between languages" msgstr "Performance difference between languages" @@ -1045,7 +1045,7 @@ msgstr "Search..." msgid "Sections" msgstr "Sections" -#: src/components/settings/views/ai.tsx:712 +#: src/components/settings/views/ai.tsx:811 msgid "Select a model from the dropdown (if available) or manually enter the model name required by your endpoint." msgstr "Select a model from the dropdown (if available) or manually enter the model name required by your endpoint." @@ -1187,7 +1187,7 @@ msgstr "Toggle left sidebar" msgid "Toggle widget panel" msgstr "Toggle widget panel" -#: src/components/settings/views/ai.tsx:362 +#: src/components/settings/views/ai.tsx:459 msgid "Transcribing" msgstr "Transcribing" diff --git a/apps/desktop/src/locales/ko/messages.po b/apps/desktop/src/locales/ko/messages.po index 4ebfaec1b..8048c7891 100644 --- a/apps/desktop/src/locales/ko/messages.po +++ b/apps/desktop/src/locales/ko/messages.po @@ -346,12 +346,12 @@ msgstr "" msgid "Anyone with the link can view this page" msgstr "" -#: src/components/settings/views/ai.tsx:658 +#: src/components/settings/views/ai.tsx:757 msgid "API Base URL" msgstr "" #: src/components/settings/views/integrations.tsx:197 -#: src/components/settings/views/ai.tsx:684 +#: src/components/settings/views/ai.tsx:783 msgid "API Key" msgstr "" @@ -460,7 +460,7 @@ msgstr "" #~ msgid "Connect" #~ msgstr "" -#: src/components/settings/views/ai.tsx:638 +#: src/components/settings/views/ai.tsx:737 msgid "Connect to a self-hosted or third-party LLM endpoint (OpenAI API compatible)." msgstr "" @@ -492,7 +492,7 @@ msgstr "" msgid "Continue" msgstr "" -#: src/components/settings/views/ai.tsx:770 +#: src/components/settings/views/ai.tsx:869 msgid "Control how creative the AI enhancement should be" msgstr "" @@ -517,7 +517,7 @@ msgstr "" msgid "Create your first template to get started" msgstr "" -#: src/components/settings/views/ai.tsx:767 +#: src/components/settings/views/ai.tsx:866 msgid "Creativity Level" msgstr "" @@ -525,7 +525,7 @@ msgstr "" msgid "Current Plan" msgstr "" -#: src/components/settings/views/ai.tsx:635 +#: src/components/settings/views/ai.tsx:734 msgid "Custom Endpoint" msgstr "" @@ -588,7 +588,7 @@ msgstr "" msgid "Enable Integration" msgstr "Enable Integration" -#: src/components/settings/views/ai.tsx:509 +#: src/components/settings/views/ai.tsx:607 msgid "Enhancing" msgstr "Enhancing" @@ -600,11 +600,11 @@ msgstr "" #~ msgid "Enter model name (e.g., gpt-4, llama3.2:3b)" #~ msgstr "" -#: src/components/settings/views/ai.tsx:687 +#: src/components/settings/views/ai.tsx:786 msgid "Enter the API key for your custom LLM endpoint" msgstr "" -#: src/components/settings/views/ai.tsx:661 +#: src/components/settings/views/ai.tsx:760 msgid "Enter the base URL for your custom LLM endpoint" msgstr "" @@ -767,7 +767,7 @@ msgstr "" msgid "Live summary of the meeting" msgstr "" -#: src/components/settings/views/ai.tsx:721 +#: src/components/settings/views/ai.tsx:820 msgid "Loading available models..." msgstr "" @@ -813,7 +813,7 @@ msgstr "" msgid "Microphone Access" msgstr "" -#: src/components/settings/views/ai.tsx:709 +#: src/components/settings/views/ai.tsx:808 msgid "Model Name" msgstr "" @@ -955,7 +955,7 @@ msgstr "" msgid "people" msgstr "" -#: src/components/settings/views/ai.tsx:371 +#: src/components/settings/views/ai.tsx:468 msgid "Performance difference between languages" msgstr "" @@ -1045,7 +1045,7 @@ msgstr "" msgid "Sections" msgstr "" -#: src/components/settings/views/ai.tsx:712 +#: src/components/settings/views/ai.tsx:811 msgid "Select a model from the dropdown (if available) or manually enter the model name required by your endpoint." msgstr "" @@ -1187,7 +1187,7 @@ msgstr "" msgid "Toggle widget panel" msgstr "" -#: src/components/settings/views/ai.tsx:362 +#: src/components/settings/views/ai.tsx:459 msgid "Transcribing" msgstr ""