From 0f318d995ab8c27349d4bb0757433dbd712f48af Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Thu, 4 Sep 2025 18:35:31 +0800 Subject: [PATCH 01/59] feat(lm-studio): initial support for LM Studio --- assets/icons/delete.svg | 14 +- assets/icons/logo-lm-studio.png | Bin 0 -> 21442 bytes assets/icons/unload.svg | 5 + components/ModelSelector.vue | 170 +++++----- components/Selector.vue | 8 +- .../WritingTools/SuggestionCard.vue | 31 +- .../content/composables/useTranslator.ts | 22 +- entrypoints/settings/components/Block.vue | 40 ++- .../components/CollapsibleSection.vue | 45 +++ .../components/DebugSettings/index.vue | 34 +- .../Blocks/LMStudioConfiguration.vue | 309 +++++++++++++++++ .../ModelManagement}/Card.vue | 62 +++- .../Blocks/ModelManagement/LMStudio.vue | 84 +++++ .../Blocks/ModelManagement/Ollama.vue | 123 +++++++ .../Blocks/OllamaConfiguration.vue | 316 ++++++++++++++++++ .../GeneralSettings/RunningModels/index.vue | 72 ---- .../components/GeneralSettings/index.vue | 279 +--------------- .../components/OllamaDownloadModal.vue | 14 +- .../components/AttachmentSelector.vue | 6 +- .../components/Chat/ThinkingModeSwitch.vue | 8 +- .../sidepanel/components/Chat/index.vue | 3 +- ...loadModal.vue => BackendDownloadModal.vue} | 90 +++-- ...nloader.vue => BackendModelDownloader.vue} | 42 ++- .../BackendSelectionTutorialCard.vue | 206 ++++++++++++ .../Onboarding/OllamaTutorialCard.vue | 138 -------- .../sidepanel/components/Onboarding/index.vue | 35 +- entrypoints/sidepanel/utils/agent/index.ts | 17 +- entrypoints/sidepanel/utils/chat/chat.ts | 16 +- .../sidepanel/utils/chat/tool-calls/index.ts | 4 +- entrypoints/sidepanel/utils/llm.ts | 13 + locales/de.json | 64 ++-- locales/en.json | 62 ++-- locales/es.json | 64 ++-- locales/fr.json | 64 ++-- locales/id.json | 64 ++-- locales/ja.json | 64 ++-- locales/ko.json | 54 ++- locales/pt.json | 64 ++-- locales/ru.json | 64 ++-- locales/th.json | 64 ++-- locales/vi.json | 64 ++-- locales/zh-CN.json | 64 ++-- locales/zh-TW.json | 64 ++-- package.json | 12 +- pnpm-lock.yaml | 75 ++++- types/lm-studio-models.ts | 3 + types/scroll-targets.ts | 2 +- utils/constants.ts | 4 + utils/error.ts | 85 ++--- utils/llm/lm-studio.ts | 139 ++++++++ utils/llm/middlewares.test.ts | 116 +++++++ utils/llm/middlewares.ts | 224 ++++++++++++- utils/llm/models.ts | 24 +- utils/llm/ollama.ts | 19 +- utils/llm/predefined-models.ts | 49 ++- .../lm-studio/chat-language-model.ts | 196 +++++++++++ .../llm/providers/lm-studio/chat-settings.ts | 18 + .../convert-to-lm-studio-messages.ts | 59 ++++ .../providers/lm-studio/map-stop-reason.ts | 17 + .../openai-compatible-prepare-tools.ts | 88 +++++ utils/llm/thinking-models.ts | 5 +- utils/pinia-store/store.ts | 170 ++++++++-- utils/rpc/background-fns.ts | 117 ++++--- utils/rpc/content-main-world-fns.ts | 11 +- utils/scroll-targets.ts | 3 +- utils/user-config/helpers.test.ts | 4 +- utils/user-config/index.ts | 26 +- utils/web-request.ts | 12 +- wxt.config.ts | 16 +- 69 files changed, 3328 insertions(+), 1192 deletions(-) create mode 100644 assets/icons/logo-lm-studio.png create mode 100644 assets/icons/unload.svg create mode 100644 entrypoints/settings/components/CollapsibleSection.vue create mode 100644 entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue rename entrypoints/settings/components/GeneralSettings/{RunningModels => Blocks/ModelManagement}/Card.vue (63%) create mode 100644 entrypoints/settings/components/GeneralSettings/Blocks/ModelManagement/LMStudio.vue create mode 100644 entrypoints/settings/components/GeneralSettings/Blocks/ModelManagement/Ollama.vue create mode 100644 entrypoints/settings/components/GeneralSettings/Blocks/OllamaConfiguration.vue delete mode 100644 entrypoints/settings/components/GeneralSettings/RunningModels/index.vue rename entrypoints/sidepanel/components/Onboarding/{OllamaDownloadModal.vue => BackendDownloadModal.vue} (58%) rename entrypoints/sidepanel/components/Onboarding/{OllamaModelDownloader.vue => BackendModelDownloader.vue} (76%) create mode 100644 entrypoints/sidepanel/components/Onboarding/BackendSelectionTutorialCard.vue delete mode 100644 entrypoints/sidepanel/components/Onboarding/OllamaTutorialCard.vue create mode 100644 types/lm-studio-models.ts create mode 100644 utils/llm/lm-studio.ts create mode 100644 utils/llm/middlewares.test.ts create mode 100644 utils/llm/providers/lm-studio/chat-language-model.ts create mode 100644 utils/llm/providers/lm-studio/chat-settings.ts create mode 100644 utils/llm/providers/lm-studio/convert-to-lm-studio-messages.ts create mode 100644 utils/llm/providers/lm-studio/map-stop-reason.ts create mode 100644 utils/llm/providers/lm-studio/openai-compatible-prepare-tools.ts diff --git a/assets/icons/delete.svg b/assets/icons/delete.svg index 188293af..8e2fc4d0 100644 --- a/assets/icons/delete.svg +++ b/assets/icons/delete.svg @@ -1,5 +1,11 @@ - - + + + + + + \ No newline at end of file diff --git a/assets/icons/logo-lm-studio.png b/assets/icons/logo-lm-studio.png new file mode 100644 index 0000000000000000000000000000000000000000..2749b4c67b9f57c815fd1ad368f103c2f34bca1a GIT binary patch literal 21442 zcmV)_K!3l9P)Px#L}ge>W=%~1DgXcg2mk?xX#fNO00031000^Q000001E2u_0{{R30RRC20H6W@ z1ONa40RR91$e;rN1ONa40RR91$N&HU0GnGql>h)h07*naRCoc+z4^0d*L5BCdPHNM z2x1T^a}p_tGA&Dr!^n~>(UK|IrdTD}E~TPGvXhFPN;yuY{B!;kC%+`lP>yZME+?kT zl+0L;OM^vm0!V=rKoDaCXrKY~+%J3Wv)9^t-+S)&hHkoifp^b7Yp=ETKIh)|z1QFS zzV}Z2?f>zuy~VL#=7@csrvZ?}(OH=63kj*u1;%Uw?kXfX+d&j(l(#2<64hcFQV~tX-bkd-S4=7V!DkZ20%$UflBt3_@k~DWxXNFS;qZNYm zY4X!Btq0k_5GOk_9So_1%1|3gu2N?1~=9rZWMN)pk?Litauh?7HVKDsCMDKEejb~cEs0BX{jnsqRC zBrUO7BHFB zX;R24!aXWX3t8Hajk`)|1S=nABj>eLrGSM87j0qS)z{QE-Igaq9QNXHN%X=3~e7QJMq*dKs(BdDFoRvQK(=l zFW~lFl@<|U@Gvt2@Wc$v&Rx2^s|>`k>6!8{-{*}rEtY24*4hG6$?2;$6dbD8ZTaXW zFU>YH3b{bl(W08PNyZsfLeevz(O~+#*eYM#t$~0m*qJ1-V`Wop4HM<)OXI}Hn9P~s zrKHPQfHquLwFA-d24cbwuF#oe`;Fz2fD1cewIfKhD9}u4?x0JScU8#ku_`uR#>;sm zTp&^bb5*s*l<@BB=~LJdt_LZQ(u6jHzw9-$B@>_zBJ2X)d&!K& zj=Ef0R6&FBF|-mll@<{ukdVpIu{t4FOb3Edh-@i5YX*ugAEq;GG`zA0;NZ&F&4*t< zS0?spB?qtqWhZ$A&i0sJ3LzIuy0U@_4l=(GaeXq=GC*2Ij!If|2~ zHp*Qk8boE1VUjanJyorOl6NrR4!S{EkF@BhOc-u_1uXrOnes$NHTM-*3kmdKd8MLA zRmp>LlOcF)xcxaUV(6sXXZ2N66pEi#k-u4S+WwxCM))}ddOLTqr-I~&)8R=3ydH^ zL@N-cB|!qf@ue^nHfLlxh8(>$I5hwh0vz3k6OLlR$3dKwt?@De1({VK(1L1)O?0o`QfkUKo-c^DSP*xk!+L?3(CmbcMHUyotWG09x zvI#DHB2T70ViDo!0!}!RrHuiLV2|teujEo86WO{+0-d;)FA?|9&dxkba$0B#S{tf_ zPVB&3m;!(~$uJ{m&gAa1;$d13BS1vax=FwUUE4MRvMZ08>pS zzsFK8DY8Q;KRhP_0!}#2YKY||okAjIR)9eLlp@Y(Cy2GP$?Tp|RJg<$Dv~FW-jE!% zG)G9DwN(1tP0Lj*%qeR-li;K$CQGV&V&)_<5pcqh@;;B*CK%si6&k4=S;#kAAgf_4 zV6Ivq0#1A^faY!&tdb9Dr_s-Op@Ci@11BJZ9Kj4K=oDufSJVJ72vbny#lFc!-rl@s zj^fmCbOI+Fo2r2^kqw!Si&<}|0)jja(Icz5rwf>c1bWb)ZqWV(NgL|(-C(^UOJ;VX zOjv6k0ftqtW342u(;Frc^2!MOFn3k) zaffQdxOr>BESp%#kw_3xND>sKGM6Ccd#8&wLfSQ7wddI-li9Mf=3T5u3}6MK3ni+- zCe!GZS3E=FmXZrp;1zSGh|6G;TZOEmG^idEVG2+P6Mmqvh1Q|Lh>&(ZRiRde1vu6M zQb6Ab=^th-%ZZOT1)#ah3z&rpNLBu2_zXY3X!W0X^`$dVkL%n8Tp)Ed!5 z%qZjY0;Yp9k=49b1}JYfoGSp8PdB*C=ZtckSXAb&Uh{s7KH<9Et7-e8z05}t@%ACpDiex2~2Qiz;CxxV4REK%B>&2>S z&145=kVJuUVujh$4TY_RxET#KBJc?j)@-0yAd4B&U?v>Vn(5-?>4G&T8!li<0M=RT z1~tkxUJ9%tC(S3pFvQXT^21?iBQe0z5p9qVS%s>Xn51zG&m5BJu49D_^a&A+{1^ha z8X$7>LX;25XgQy5C>HMB*^z}gLuYYPM==1B)j%d(rM(Nb50wsr&pWsh>v9ywlpF-Q zU7Vy+fTlQ+lMrN!-f;jGEO~`;O6JtFddU-ke@&2G1Y~eDa}VsTs*g!piB!T zXg-{99DT}@FvsQ@eIQrzYA(UD^a@55mxn?1k|#lquB(&6%ndDI6e2=k5v@AYiMWR!~eFaJ#1Rp9ZYG`w7Qw3C*K1*geRdaPkPXbcw@=J zIEodN)mayzpJW};Rcs*2p`}h)nxD zCyH8t2|vOhTM-kkoNxqt4OTBP3n;~6y-~X^smW1HlIrJegqnovqSd5alFl1XH_)4- zTh|HV0K-y0Ka*#b&kb}BIkQ>TO=26?58L$F58Ue`03 zAp=ms71IGSIHIq@EHFckZjK2*AZ?SvRG2ME#vFk=iU=OVXu7 zSe1u{G@rBuc@k8pfxPqu>=M@Gx~eCc5Hrp|3J;V|f*=I-Ljp25g2#^22Xr#j2>ogj z@hC((0nbFzsFqsdpv2jkwHok{`BEig-#dc6NN03+A*BIGasMg%REl@;-L@S_UWN>r|Cn^LW z!DxwK^-ekIAeW`fX3dn}kVF-idt6$F48wv5*FTyPAjm; zLzm{-&X9*#JOx%;aS1EP4$5LB>GHvx-&J)xvQS2)Vz^^ap)N10B7~6BT%<6?st$hCtE@axQXZ zu0bN~(dPuKCkgHk!h{(uORm7kj3-Ywu;o#XM4?_G20@H$p~qrI;DbPW*$kD-L|=)UMvca#RRh^d2kC)MDI3*eUcn$QnGP2) zC5U2HLB$LpW}HEaOLw^ivbF%?Fi0Oo%8h_H$9yDoB;f5rQ7h$H+@f)$lW3MeoS6bN z5DqJ512N&Dy>kzu90`uqI7Y`YuI+1U9r-gyf{lbJtzvWSmwi$vxoZN}BF6H=Y8ptE?iT>HEyjUwT&djZS;O_|2AKB_ ze74c6T3NQm#dLzRaH%(i;E7ozfcu~{ALS|C*!(J`2jP+mdu%#tWa zFGPlek-<>s&WT)ID`;xuQ`fJ)>On{-Xy@n0E3fbj+EtY-o*cL6e?xlIGm>;bV*tM zqK{r>B)Ke>TZ~a*qmYs18Nrf?FNN8W*o!hgWh>nxm;h59(f$WNaOOvU_>K>M_{_OG zP8Q1~7H}i%+>@vFPM+L5fy4PdyJ2xeI6WguVC7_wQWsvo@%67=dFm^dZ`|la%!p`< zxrymS3fTL5SFhZ>_~wn*&tJcGmA6%^3{-}|*4T+BPVU)b9&aeubi$U+ z(eU&{l(U0e-g@)KZ~cplufBXeh}v?u2%o8?YLwvS{{Aa3UVH7;YuSNk54=*PLI|>P z<#d!mDw{VPwYMR#F;1FcQy+^?4-^N zPk|Avn1!Iw{W>#d6VN*;ca>3`1$-Us?fX|KCHgwXY<^dTFO(!?fxq%g_dN2R`0lM* z9w9C*;j!ve`<^OSasQDW$p)xkvOe3RxLgvfO_U zK79J&M|>OM<7`R5j1Og4WX&`L{~N(`W9IUavCwCJa!T+N){1g6(Yi4n{CnpDw@^mV`>(_mCM5lvm4MtW(bnGH$&z}5` z|LonctQOqFQEGWs=_(=Vy^o(ceWqNGfd|u-RgUZmToPzKn3{Qz7k=PX%b2UN`RuL= zpS}7$wPCKNMJv4or%VI)|C3Li8(kge41=b#hpYB|RZHHcNmk70*w)|s!L#;d(d4m| zsii;#QLoSU_fDQT@zBHZpw(M)CQs#JFE?253hJ-^4+CvYXeEiYH7eP8kJKMvIX#WPxV@AmA@uKuZWFH9}%g{az z{=|pQh6y&Y`&FQ3rc8+U?|Yy>i4B!2{jzXoqvqZ>00(UVi6F)mGaw9cMBoEK^C7HA z5<`%T55DF-kfAGimKh^m!H!tICkFJ;gZO0SArgTd;|ABhjm5E%-Qjm|s(E74fP?j$ zjPZz7_K3_{K(PX{G>)rx_ADNO#1hEYX1ms9ZARHc@63WEHx&xOlom zI?Iknmkwjr5ds6xGU1f>@2R}6A{QO(9yYjuv3a}2$I7N>Li<2cdwOC?HiKuhn26CdC{*S0)RlblwI2Rpl8Z8y;uQr>k63FLKnH{RLlwstjiI>gEfWUW0lT5%+DFN((i0JSr9RTmtVSmZTyI% zHnAg?cEuT){hilnB|03CMJ>wxKb-g=*8birFI{US6d_OpRMy-HK8vkq#(rw#w8$9` zQDhT@zJR%^0+JBSxS~O5a=7`?d?rjlwZae?6i+v_yiJ0upyX#xu=D-6c76Y;r?0sC zAx^5&xwU@&?5uN+Ijh#b$c+|`X>XQSuH1b7*{i9q1!|mIOs;1aca`Pddcz*C{0M?3 zZB9Rc?QQ_wU7bE=0SWOrQp1IK`p{hzc?MU5X*|Xw76K(81W{vPIl_;0b%dgz{{O2!~ z603rnSIw+^#>gP&mm^>eovG(Yir%%fNLxlrraL##9<(l8 zxbfLvxW|6L=RW~XHM_qUt7mJT18A6a6HeI>g!*-^shgEF}`-!0*a@V zv2)H4&7H4ZyZPKRSMcuuC97Oyf*f5O*>X|Dx3&9i5$&&;AdF;@8_+?`qF&5J!_y6p zDbXzwj%Zgg)S%!?U%K+*3)g@4XYcyxN6)q!f&_2q2&Ze;_fDPKKXr^J;83)g@5-@Nn3&+wbeT1T;hYKZMzCiZXIOL4Bh{NlA6_9fMmIPs&=B~=hiA1dfY zm(_xhEdW@7kan=pJQrCI$6=1E9~TshyuCCn{0{nz5XZr`hb((8e%z_$C0RjAZbr2|qBbs{Y4Q~FrLz*Qvi8lNJ))d{ z<@!r6T-~?tu&uemnLRMLTj3br{tKOdH))1%zjXt*lpQ)rq9C%tNH`++=HXQ^?VuAX zBtnsAK0p8oPU2BbUkYoLt__!EF^3D7^&0rrTQ`6I_w7@;f$S_mRfr~Ho-Kj50+HsU zcF7lJlVHLRIg`QBEji(sG9noH0WabVJl8-@I2tn>Ens>%h*M;f&WcI^z){!(An5me z&^;q5K}vm2Fil#!6(%*u=(GG>54p(edS$OvR#(!ca|bfh#M??CizG}5j1+r-Q!y%8 z@v3MkM^Vb%D!YJe92}j?2}h^oBqDkut0aQ@At@Ogjg^fS&;_9ixDP=n5{0Vl0i8Tu zt05sziZL>t1O~0MGq4t#d?}R4#Lja#)tyDNr9wmmbxE9XKX}g7ESAZZNWaShkUsV{ew8E;tYjo8plY zZMrtsuR{{-oA;cw?+S<)3$Nt#)R4C7wYP;?is0G5A?b25!DF03i84REs~il2=EDg` zZ3((??@9jTQG%z<;+SKC>mJ=>KO7gkUSw2iM{Kf8|AK2>ZA>3g`*Ea z3{(vS2lb_JeOGy&N(n~GQ6PrKESESJFp4?7tT)XIGe~uaUI7(%?%Wb|Od|L)s2u3R2vW`^ERbfIQsSh_7&)C==uUkVFd7PWh)mf|s|LuQ(h ztQ5Fsy-W+Ev3{qgXW5iA%I@~}?ehP~pSkO&{`@@;w3mWqcZ`lO+N*)vb8jXs#gAZg z4C-c~eS3S^Gf!RmoiAK`=WYMVJeba2N;+7xj4|%-z5CAfSH5%Ap55qehCH~Gv@IZ5 z?;UPY$WQ#Ozx_)1RwpV;=jCF@io#;ZcivUOtQA~UJkx9mf?3?HCHm~2z2n3$|H^|O z{r>fv%);J>Bb+90GDGzvINTC-vrs1NiLQU~>uwJe$w1kJVmVU*ZSYZc)DKHYl(Arp8UCAe8}EpvC0glHX8XnnuozY_tSeG zJNwxC&#H)B%HXCXBj#+JfVEY=9!qB8TTo;5xMnJqf}-`WwGxqA8uOf;00pneu~4GQ z`|5x4Q}=x6Be(6b${O=G#InILvT{dn_skc+VLIvH^qCW%`pg6NZnG|NW~D1;U?amC z?Y-x{XYa)uK(xQgx!d_a0T{6pcPjFEzHbAY|ekjghL$1wJ-tRMe zr}a<#)ICbJK%NafU}E3?FW(MyET03)94)=s!F~6ie)3P53I3TU+TD!U>Ps zn_~r6iDCv|F;2@0dVajn&xlL5%>l_mK>_^|ySa0tjF>b4AP{m+GzzKA;+Y|tAOFOi zcige~lpUQ!7e^iJ&DQN;Zw>rzOg0N`CPo(@{XyMWewjqKpvLUkG0#o1`*+-V^3J>Y z`{37FV(A~0Cq zRk5bLB}^CG{l#1CX(&S9kA(Ta;Z3kGTxo1UxlqF<<)qmtv9#`~!eZc2i{$;i`yZ%Z z#GTEgq=ygM3%zc^=kOK5+$>~?&74ZuX98}8>a(G9FVD$Uffw@J-kGyyOGRLS>%cgJ z4l<9i{7A>)P?IvX(DltLoy%(tP#)IU1v_Ydz++We$hy$-wea*D6^?pNbw)?^I3j|vf@lG(hH2KwbFhJ`u1QcKTpr6q4t6|ww$_Hxbj_8%It*KHn}dke&}nw87Z8hGjXEBSgrNg3prTbUa%DKKS$73R$m z)_BD7tSFushQ)$HOU#P7M7l1xGA)UI4*vaM1`>6Cj7Qug=0^aq!I`(M2sOFH&Y z>e!12T8GE>TnkH;RdQEuMFw#3&Fla2H{QT*45 zBsb8bfSI*_SHAe{l{f5{o!UXLMhRvXdHNJop+FJ+hAU zM}lHMve~H3dGl2H0GEZF`T5UX{Mwf<+wW-nfge0~wA(A&h%UVN{(^g>;p)}PdJzt^ z^Ur_YNKWt)HOeY^`A+=J|K=qSYfbX<5@%GHPQ!_OL>VVO?^T{vR6uk!4hO?n{xjy8 zvnT9li1BbErkQjyGS#j!Q8D_pN3E0Ncb^u4=W%k2coSJQ16~a`<8E3ly2R}4SIJ*` zXZ&gqb*`Z94*T-4w1Jl7YXAPVU)-{JZz+g&lwRtaDVl|Zi>opW z7Udk&QcHkZY#fDyIP(OR=`xmHcgwBN)hqUYse+31tcwHYIFqPHPzsjo%C zqFx0tXq!T?HJzKHHP@hMN~n$GHbG)vv!d5+W@Oa+iU~lVf(oii2eGK3lu7|Jq70_G zV8tqs*#um+5R|PewJ7V|bXPFUR1MXvKoAD8L_(l<69$87H&(Wl`bugRg(7?vw7Y85 z8Rx1_m7+>Tj6_XJUcl0Qkr!ri3z*b37y=(8&N8K?6%)V;L@j|6p9%#NP)NMfBKP?Q z*d$t5tGq>;B-2LO^>9WyLJ1!c<7$eT=#ioN( zrh~0e(A+xXygnHz%dG|=5HTj>OQ8kI^|m}SvY}0j!X$X8UehyWhJEnEXFvAj9gjYC z=8ki-f7wY@0~%CJE5TC+_GU0WCp9N`Y0k>(65Nhx9wVnA*x$F`^!M5;*Pnal@{9J) zh0Lz1=*yBg3e1bw(tfac?aIw}F5b9!;d=kYNnscwX%)bPt5^q?E1*Kvr7}lwl9wIE zjm3tWFVz4;n-+r7S9?MsN2*xGaZUF3-uJy{e&HAIf8u@R?^#eni=&P9I%9i9DDJ6S z^EpD*U4=6DW1qV3)fcaR;d5`Ee|hno2 zD?V~!&k5iE(3vx5_Ac7n17*PMmS^5o8%)A-gi{>6d;yGsAUKZbZ|M{ZIt)tF4_!c> zcO6c$RyG$3X2SB{-}~f`-}N&;cdy+C!e+co9vIm>ZQ}X&NxcKr$0F=1G|$<3|NLhj zeD4!_gf`lAgT@s=SItNh*V!kKJ#;;8Z{Zq;vU5+nsmS1nYnQRY28KBY(CtGi;zGs? z{kp|qd$ACvzB7kiZ%ZXqBlC#pwZC`wJtzOdU%W5*7BShtLr?HdoAD~+Tk|<|!QBn- zYQmn=|H)53;NSl_O{>&|=1qrSORpNR9O%504!%4=5ofhUak!RE>} z9gvl=8%t*7Ek^ZMnFO1dpnnEvvas{1CqM9^v-bBqGQCrgwC$+;1ZJCb8#B8a-jy3I z*sbvX5Bd8dr(A+IX2_^40uVk<>^<@r{!myP3J4fWH$c)XnlV#UBWL1NB{C&A%IHg> zvm=Ka(?ZP1Mx>YlAjTySR6NaZ)xnb=FK;(3qPmvD$NppE!_#k>xvSw_xmx%G-*+ze z%@WcnNOs@qAQayI-aYr9vQ6+SMchUYvHQfbibVws0ES+$S>wFZ0L7`^3GXM8^*BWyQ>k|FNwO7VfIDE4Ofd_`Uizt8`ur z80wN#10?Mu27AmpapI11<&h$}Q+cbHg`^@Ro(cse?nLDQ>;Cnp8>)q31=hJ@27nls zKoE-MWZi)y_G<{dm+12r2mgNM7CjCj&s9yy2c#F8v-%tC(lOv@in5h~P6v3Os_(le zOWR*BNr@G%3wgipgHsp|kQ0q6<48X9J{CZli*cG110-Gsii$|Z+)1V*@VAB3?>86u zqfKk6qbY9>yDb0EhViHBB~_3Pv#rkT;$rXJ9505Uj~+j&0x=2oN>(uV+*8mcLCguq z;E%z-bm&WAxm%1Kg0d<~k&&GOW-(4gRbuJzcl_cSFnww*~XXZ#%JxZN?!v4J4u05?$?T)c4OhW+U^Fzsa5+M~e)W=CG05;kz~{7ONO|Hm*AD^HXeV5ixQ zWq^O=;1OUy6qH^l7-d&Xt0YwqYzDx=XTNpT|7IHH#~5YqTD&v(-}hk^k5$zE1mBBS zUKoETY-!Lvdp2erv_;$1J^v!!YZII0fA!BU zAYD8%%t#J^0dDRnR)h~PYZej` zFPjlN%W54(?`=h$Vv(ZsN%^Q684)D6zi;?2-+uMw>!10h`yY6)yoRSQXE~G4I)Psa zjGsdv?HxNyAI^6aBIT%~z0l?V{pS~+|JIef9%-S=UN*4u)DHu@x-UO}_56$YC87`$ zFM*)J@M7A3mrd$RG-nocgSmXVA!?9aIxR~rwq-5U)LuJtVXpN&^_9!tc>3}We&WuL zef*rg9_yUF(?+?ipj4|AYj`|SYA?jHSI*e&Wk2UR*bNruIHdI3D8V!&g96!Zvi;@N z%Qx-cfY1KX{uTLM9F;e_MD zT7;6%#S+X1sw%JRcON}S$hEGc#Pn@jC{g!~ZU{n}a!!O;67Lk<|AXIq*Z$wZbRWt% zqLwEx&cHNurdm#%av}7g9mgXTT2!;CxT}UqIE~4;u_nHN_dolUg=`xsqI&lf(?d5(Ig;h#5g= z7BCAHke$eEDsNrVXUYt)R`5;(P}kml-ZiC-#_V=bC(|MsNW|tUGw&+V;KNDOfWpnk z?{4s^ils%6JS~gEfh;(zqZlS7k%bDl(JL7oo3FqKd=MB#4+go!6_b6FpjAwn-kPdM zpt*ZhP?-+aS=F$Yarq=xOBOV6>0^53I}^@!2owz%Cmh$z$dJkJu_(~(>q?w(kUe^G zZX+&wWsM;&asqCdB@-rA3YnaF1<>=Of=Q*sS<$PQ5%{3f3JeS2A{E;O7?cgB!$YjS z#8kKf2V*9%Y6PaZ`efSj^l9K|V_Z`b0?P|c0PU!Hu}P+?fSR)Uufk$cN!@qZ%9ME! zxY29MvO`2ZOK1R|XdovXP3fEK^9qgxjWLU(Ged$wdr0f=q9jOd)m0~6^ zYbP@|a3Rl1^wC(E?s=F(rDIvw?Eo16mR6tc8;@9sl|T#ujtUB{U}wfG!QnEjBF;?_ ztrj{MB)c)gymSsEE|ZuM_@L7Y97=aYE|)821fqb2iEuFsNFR})pd{o~=h;FQrC=%V zAZNNyRmcJ)`GF!h*c=T9mNriElbBGEE|3b`Sjch?sp7&WR+#&H_ABRq^v~Q`-VEqZ z7?BFA&0|ioXPoc`8~h5P(#EFA?MTcUfwQgNe#OsguU@k^13vYoOL*ZxkbPJRZGK|% zIa6pb^fv?AFC>cJ%UH5`5dv&mJ(jwCrOH-Fr^|=Mkl-L@LJ-$UFehuNOt0eCdoDlm zU;Wy*dyVjI#b&+!N1wdw(|_sSbM}%&9h3HyJ2+@hm*1Mz!Nmq+SCy8tyXxQm)>}{g zp}!cRWz5Lu0u?aR#=+g+v)`X)Z!nCV*Tqii+%qqBGTU{{J0zoM&=ljEMuX~egQE1pFZFSz=nAX~V~#3}$KywzIBS2i^V`o{9>1+QIF+i{1AS_T zOJ)`6`gE@Heh0*?gA32ReMQ4^IH}_b)T;S-LpEv@Hbd(*89(#sdv4=hW#9Y#hjdrj zGIOP|;~f6n&p&{-Kjh(;Q1luw_R&thfdof|`yM#;z7OKczx6td6q{k&6w~GDb@hASm{rr1wb%W-eXB|ZrM2zA7hfdr7Scbp| z`qB(sR zI7bTN&45SJ+zNSD!@F{$=ACz+`tXzXpw*e0S-?0)My2b3Y>oK#B0tQovB_$UK2`4_rq=T>`69gymOJUUE5C+AUf{%Wzd@T@-x}%Q% zKDg1=QKiS??`n8gt`>XW2mQ@}qAMsQVtLt-473e)r`qq*U2XyOl(=<*j5tEB*n*4| zxx+ZEZwph*z%+83@RFqh*mA;gd?_q9yNAeHB^4{eN8Wqe-dAI;_RPY+zk_R@Ros~h9Gu-w*U2_kej8|gI64mS*U@S@K7NtkSJg& z#Bjw#Lg0S={aEl~r4rT9r1qPMy{eeE6u>Ks-jdY;;>{|Xi7i9>%ej$~R6(w$K*o+) zA6Q`hnnb&+C~=a^^3uuE0;TDQiU9dRT+E_qw=aq5>5bTe-KxlDOkqa1@3F{si=>bnA47WtiI#H&%_i0Ujy!TRPhw>&4GI)PpLq%UGd|aYL8c@@@WH^O zS?G2)BuIIHP6#Q6$|Rh3m9d8ttc#~J-Ku~L#gqajKH-<5(dZ~Imh^1PfES*>O>YK- z=U)3=_`ZhQf2yl!?%0C{9By*{C3{^xsr2AzxN_uR1egg$`+N3U&?}exuT43KuYcPA z)Rc=|efZFc3Ob6pFx>&A3ByR`==?M_I5JZvL5^*e<9jUXvymRT6gBquzWQbUHJ?s7 z^2}SdUvrqtJF>+g1hFxPU>=TQ-Tu+n_-4SV#44(21e&`*GY{~)9_ybk$;-O-0aC6a z9}LVVJrPv^C8ZG!7$+Rb^tqk%H*!T#P&{P=x%wDSHw51(V!5wVqb+cpXMZTL{SnF_ z9c8HId*98!=jIJ>bhazEA;AuO>*-4`J%@KLU^nQI<75Mo3~mf=?;3l!dii<&;YFk2 z5->Xgo6}QCAOzI|iZ}?JeS{pQ?pP3DE6|Vuy4Ft)0CcjSUjXm1VVsmV!-=P1WN?)A2nx)QqZ48x8?Q(PM?T#k zZzj|z0D*}2zy0jh|K;z!hM)Ey^{75YYwx7E&G*(up}vyR!6Cw{S8x84|Ly$cch~=R z@nWiILBtqJ$QlmW)wRbedzYzmf(1r~?XKcgETAz5*>ax7OmYgECx~QlbOI-`H(<>) zgdoRJ(dCf949EW7sUQBte?Q_e&seO&mh|=WH@@`6OAkJL>OGH~&L18Z@liq)@ZTwI zf6uM_DEcTCjjM(mYLm^xlGx{VG3;$y|NI}kZoeF%deCnATXc~O0(o%t`1-Y*&;8M5 z`~9{K=wc$Dhg6x*h}NAhNy05keP!w%$+W4+;3xn&fv|5A0Vn>;|Lj>z><|G#{qQU> zGBob*+l}?HA2|2u<7dvD!%Y!XF)2-RAtP8Z?dL`IbUc3Dt(mcqbK3;S3fKgLdSZa1j@IXrs^DOb3J>-~4r;XT^$ zvZ%$X93A6?W73d@7AJmiju8hvIfev-3_*)IpG2-XCsu!%dI6m57zNJ1dj0%wy`9!o zrivXL@>WFE#T0@{!`>m`D<*+XbGcXKibwhIQHw!w`UF%HPQWY#5eNx`PD{{LLGBGj zrWWGDyF4qZiAiSTN9#ooFQ*n$%o8k=4N!2( z1R?MbAp~q}N8NsrDHoG~GeomA6BZ;KQH7$%S`FqaCU@2oa#Y}DBw~A|5SgtI!BSDB zy$KN%q;+Akx#nYe;gTcZq;yBuOuAJtW#iJ6hpKx{Ug%c|s_q3?x9$r4J;#Z9l@~Mg zsq-c!AwW50Co4|@O=}Yt$sGyM6U+`(6cVVwTrvhXjtK~6z?l_9;TT0wkYaRBI3qzy zlhd7T#zfvy1$4F;)B;$6zyjnrSPqg73$)3(kNQ%GS!76%DHHBch>3Tw_{a-d9Bh!| z0Fm&lPjlA6z#E7OKYBmIs_pHJ0C{YR8$%X>4+Jp^CrHF|yFtoT&44XA>&US%ed9=g zhaY)aizkyd$_W7{K045lka=Rnd9ex@H1}I>Ab;CTp|gbUfYiH1v?UtAGSLEyr52)~ zDjX~hUeD)wiXN(`Xab!cObpxd%G7lhsDK_A1N1=z1y-vq9Ocn~2n-4@8gwGlRm76R z31>SF4}wcujwyE}U}Z;%ahA5$1e0uzzDiAKfmb&a3#q1{6IT|r!l<1D%bArOl~REVY+gG5$7nN zi6j@zrO{Id%@bb=15{2y=0;acaC9XnS~UQI7LH>lXn(5Y$A0?0Kl`bBA9$ejL+LR_ zd)@S^;+xmM^#5J_%I{v}&j+<@*fZAWXod!%!2>6<9L@_xAqUuRV-c`Wm7&Pr5iPgs*`0Pcs}= zH{l)i@or+VhH?}3zvqMJe&i?b2PGGt>D)+S7#Y`Sfc6Oez2A3dyn&!HQ!+~7oS)*B z6~S*H%o^zY0Wvf~h$<6Vha;!Ir8B=VWmbaD%o;4G(HDR`6yvbK0nGOI9)I82?|br2 z3jz+(@drFs9aX(i8*Sd+BON39Q$Kd!o%it7CB83FP)r6G8E0fjf_rDroqYIF|C5CR z=U0kpjwq@tB+vrHnuBGBJ<#aB8i=FFFo?0gx}o@_g@M9L2MHtVjD*m_K&5aB8BYJf zABwvQz1;F)_f&q$9@5R*sNB)t3a*r+$HO#q>203|PyXrg4o!lU=@@_t=9S1caHV{g zAAam?=C$wifuMu6qbO8d*VlAZai2&=tgG*G6!aJRd1aqCj#INplo1t!njJ7-mM5qL zlz;g9?=0x8g|?FM9L3>n*v8=%x6E9YeBuMEw)SBEuteR7u7esX$&SLAZQnk|9x!$T+nEPA9<8nVwmkKyVV7t#o^fJ!*+nYyi?i-?>eh z6e$TEEn5Jeefu5dR2+AdIhsG5No|RWLrO_M+?b{rEtD58_*KApBr-v`p}3Y6xysHdASM>ThHcWl^hWGga{`b>t9%v?eL6qN5e-TeL+U-9~ERm zI|2Yake+$^*52^b3-P&ZFR*p$k}cM4z_U0a=8cRlS>#n+bW#67G9rWOC zUMaBXrve^yH18_0KyVV70ze^%@3E{=@0D!kL_!c1z=qxqj-CZ0K1LP<04pY)_V=Ft z^5S<*NqLCCjVsTp9q_{WmBi$1u{zyyoP{!4o7($Szy62s1WqExh;11EJ0(C|thLp+ zC~v&Nw+{`5m{s8y1-|&8lO~BEJG-kWBa>2}6Er1mEPoX+YiOimln)6&KR71cB7qjv zSftS{ncsQt>N8Ir{obnP#o0o82hrxCe#qFiA>(*s7RqSq^u>Sk*1H$|Erm{uex@RV zJMJp`W3Bj$I({#JPDus;iE>0^YtZ$}yUIYRJSYI?u!{pf2S|&qB!`C6EZUm*E}eUKIK?8-l%Hqm=%gElIT5L7M^CKlP_S{&%V=<77LnEY&k8 zRue0b1dPLTc~`l_-b40#zy0<@?>YV0_xQ8MPTzG>p$(F~Nx^>kqCKaTKQEj~X4r<~ z;o&sRXsx!3_Kh#S{ki|+e7>K;NU6xEfOh{BT&50QdgsQszKS102SZFL5Y!LX6TqY} z<1$qcF&RD%L2nJ|2-}vff`~S=t{DwPwpFzTJ)Q*x>-oaEwgd(jFU-J%}?fA&a$^yVL*)Sy)R7HS3 z=tR)|KI%)~=C@n~p^jLUr9|b?4)#Sfg2)lX7_kq;30ihZadYt`H{t{=U6!)j%&?G? zm_pDICmdT#4n}6m^tFK?xq=G9l!pjaMvz5(PB6{UaQN`X5>O=_gn}}Rj6L9l3?MO!I!Xgf#CJBc>{{1<4$UoK5Au)9pgNA$eM$@(N3#Oa>Fc3iK}XG`Xuv zl_3ug;mwm)0ugY+kv=}}VCa=WKn6z#f+5U!<4YltAh_YWS7aL#ybUlnl?q9zl2i<2 za1_tXMU=)eTwd&9GSbv@HJlGl_teajhbSa7x}*05qYw3og?Sby(V8al*NYD#~QEQ2`i>i5P$#le@}cPj(;4sSVEX z6^MWnjt+3baT_cSqehV^WD)(n!pvY<(Hut=1F!-?ZeXZ8NQRF%5y&9HmM5WKY|P}f zs3>d4HB(RvRSyvqkf~~{#IfuZMZM2KBn=$$5M`YB!C-_oH@f+9AyXDL}0MaW6Z`sR|jg( zxB36LX~sWug(@}3; zzi|HA4ZXj^={SXMy@A1qd@wn)f4~3rUwlSd+0bqP$n(P&S0^C`Ba0VMg5gQ2GenC5 zKBP;jx?fF>JbVI-S;!$B z)jUFs|5l)9BO~S#tk&{aflv`twa@p~Z3Ji`o*KdILh>=;^VvJ6!$AE*x zgjV#a&pz~%f9avf&IfAd*(GYP^Tbb_jwV`~jJe$nKmVUU_P`@&#>Pygb3 ztO+dw**0-H+N|pr=mAg0di4G0>|KfgU|JHKq_=y5v}Ee-UzCTPTR=ba4RDcNGZ%y* zt2D&^Q7(6{$hJW6O_7|^Sd|lV!Jt#S8d-KE3X>AA7P6oE%tNQ=-=|Y9f64_+0( zW2WBO*#%u}^j`agLDm_C=0F3D*1k1RTy9A(Hhk?MDr#|Jj z#h>`(y;aa-Y78x5=d{`K2bV2*OOp@(Nq;99F59XV04s1+%~^KBc;CbJFAFj}fk|$R z{E*8d_835UC$!CI(de~TCCE(Eh@8O_S)7Z_#H!aR(`BA1MQol6rQcK@OwaimM`Y^? zNZaI&1$^keXYRU(KlM`crHOU8Wc4||2!3~tN501%W7K-7;TJm+$}4c9%>W3*d+AQx zai{WI9w-Y0>^j6D?YMO~!V;S4@!$W90Mm)wI`P6wkfK{i~M-g+aU zfC(j-&rEV@ci*#kX@qx(6KnTS>8;?qbz9+IhF5@IDg~Ewr2L9B3vO})dEpvJ>i~Tq#_nQXDdI&Cxxr+TV?zP5lh`-U7c#@V z2GE2HeyjWBpeC6U(i|$%IpG+nd+KczX^<;VY#l)@W@*n}j~W|MKVd~s;Ev1B|MI7M z<;~InT+I;)00c?&F+tFMHemt}W>5kMFf20{bl?FtCVOn)lu6Kh%07k8ZmMuEg;4SQ zOIQ2fNif=s8GRo-@Kv4%Gmpx6&0g?c53RDcTzWTdWCSX}ChcE-H(rmQFe3Yqwbu*K zGireED|D?(BIST+0@pXD?=$7id}RS3n3+@511p4D9aI~=F(=KKc?(6>2Ju>$hM7lX zPewoW`xi4?qhwa}fY=NVU>r3~zi)k&Z}>3>fK1461t244&%T2{9o4Qt`c_#jKGGx)o{}+2`snpOt}adXj$wB zV=eTXzy2B@&r~nvgMIk3^?bm4)DG|EZ4UbDuUxgC8zROgac>ZhdZ6!}s;NR~NHF_nzj66r|KG2-E|u-FL(c3E2k}llq+)}{HtbIQ&Hw9_vM0`G7Lyvw z44@}g*$b~;^PdrsI%P^Kxdnx3Z~rQKC0G=re+GaQN(HXuiBirX@Pi4Hd#-{7vfQ}N z9;7xjor!b^lF2AP)l$2bM>UGv50l0m2LJefdig8A!!L;88M4DxZXMLklm#A(tl&WNOJGC_l=x#41z_0bP zA0FaNBD0+L(K@oX=v>`@_UU)N`a5r3ekb2cZMO z>l#SZQj&?3_O3c3ZZw;m_|?zYjYSqm5ZQu~4ALU#7!tI}f>~(H1teX$exxV|Lp$o8 zb-8ENZmGVeS+WqbPcgF)sBpG}qsN#8AbiH06>6TvL`Jesro2_%1DwW@g^G>0=yQS? zn(v=;Wc4SXR*1wMOi^eaJ+}Skb>IR9m#d)ysA2^GIpH|;^&FWg6FL8oTtQ^q$-$#t zsr7I4bHi|QS1qZ5?6ZMfJU|k{BV3#gD0GuMK!Ophd>js+>Cg>Sk-wMG1}Y*Gk#$je*o1L?j| zpn+VbE=APw0ZbB9y2)8OtV5i$v`rt=h!BAff(43(KImPH1ZKK5EKO)JOyvTpa3Uuh zo7}<3h@b#^(Cv_Sn=tFNMsd-QJBdt^8W`lY3s@^AcDRE+TQJv!_&ru=53oSG`m#ri z%&~@Y6VJ_F!(3+J%c82@*F{y*s;)K^%t=dVS4X){|PnNOL} zBwJCzioeqGGTS~3>D-jH1rdylOAUdZ7a;;4I=I{!F-gO?0(-@>Cu?T>|`qG6`PzpumZh2vUb!$YMKfHnD;`i6fJ$4-BwFM zs&aGiqkWGBBuL3cR6%75c-UU0t)7Pkp-HZ)3sR8g7L{X4kY&qa!VfxZGr?HN1x98q zF9nqBJ}Qj@^l4yBJV(DErd3rk$*kt?|0GzH-E@QKr+$wGC`e%+0A^HkbRZ_I zAy#`}7}AYi8D;*^q-G8e&d3I>rgd=I$U)(jo?h zM@%yB?8u6VjyT~+2OkA0Fj#Q}7!EAtIw`ApZ=rZPX>wXT-9S?xaUuYzVg*~hs*p3Y zsey+)VE`)-+z!TUm{_jP6|aNJtRii*B&-Oaft+weUh?S22Z7GSsYrSp2`Pp?%&JN< zfs}&mF-|K`*^$daVuu7o`qT|7o`rcLC=M0eUGh}F6p|%L0Ghi@VC^A5wUPl?%5nin z?*=p54FaFYJ5MVt%o64&Zx*tqFyRLsbU_NU-BrRnlkjp>0VJ5rpn~X#433hcKL>H* z$IPd1O!I*Uj_|1)6rhG6hCBoVoSyy0S``Dp56G5;TyM#W1Z*5_O_BG3T<%)~skU)r zsg!49&JCfmRbHJ~HRVK73d1sE*j7#ubwyU8f<$96K*kS;>O`J(?+Q4ikWE4NW7Zk! zCnN!iID9ILau=`)#)y~Pv?@{mjeee>MYUAI0^-AkY|NCYn=)wiOd3be)k(y$0L_9| znY7L8o!PSf3^U4ZWnw|fi54)c6cD`4dkD!qV9o*@J;vjL6G`rz=>P2^Ub6a>3#u;Dn?1&Iw0napGf}6dU>-w1Jd#-e`y3YCQmL zr4pM#`%(xwKiHou32M8T^@4ti3W}FOW2TQ8iJ487#b&lJ32r=uG^V)Kp4~bL?HIA zApccJ3s+YQ#R|f3!i?oCnJyrxT|&zQ8H8uR%1)T(nduCRdx|>9_DhlClwa3^H+@93 z;zgrS0ks$_sCYx3JCe-mML%T8;OIb1gr&GbiWP*P&k16TIQvS&ol*R&kQQzeUanJO z2C`}}mv>c4N?QkjSV>Y~>@z_O<_{s|YGs&_wu!a_pr=>kL}QhM;YS74FeH)%(SZO{ zM3(4a!-olu4shZlPW%`uob4dEsv_CoMUS;wm8%7UZtWh_A8N#n#a8AWEN!@eSqIT{ zmrS+`yh*QRu{B%cur5%B8I=YKSi(^qKm$3^$gP1P!D7Au$>8WrO!zSXra1zg##QD7 z0-CsC$X6hfYxA^=_V&s$^N(f-wCLN63T7uSH(9vgR*P!P_ zV4!L{%Bw63p%`@>#VlLsEXgX< zGL={SXxv6lXmQwa@^k~Fw5v-NI0=fR%9EL!-&X~;9c8gBJz&pfKZCgf3nA@`QHBv= zNYG+VI3m9WOLoWsh*mv4VE_)PHo?K$#iZ1|WEkc#PZ*KU5)x1$Rm@#OE21a>dbDHt zbOY33w=U)h6(bGf3M@XJBzcReJcKw(b(xoIoj#pO?o7@*037nX6ek+VoYN>L9UeF| zM^HaTmT8WIAB_quQpg;~i5~+&$_|3+m!Ps9kV$$2F+lg%yZ{AbJ;;i8r z)ld)uXRSaaD~Q~5vfUbD+%}G0Y%~eEA z$rYGdv4ZRn_reOEK&hxrrZt+R568v~gOY2O5*0E%Eiltb=64n2%K} zIT>`-9?7tH0TW&)1)N=MrZq<313@|mDA@7Nj5ZDixtbNTEb4gSc*KG)YshC(Y_chx z-hJ}2&o{*@xIso_+BgAQmtxe`QK2VNDm$~%1uQ`&;KF1B-TpS5?8aL80_b4OCg6fucE0SP>!Mk~0c!4fGJ!HKfgqHn zK?7L@APIpjZpzAJykgRuqX;$dT9x4d864~VD{^HzNI(>L1a&m$*j)>9wetqS9~S+YPaQNZ~vlC1Y5)?XP9#`^kxjh5_RjAf1P+L)Y+CjO`Od&~?*A7J&vuX%xXCNh5 zBLfL@tW5Weg!0ou0!0fSpXUjUOb)7K1CA!l4&kI_5Lyia4RlX3wF^!e&f1H?j6zF7 h^JI?=47b}l{Qn7T*uqLEg@6D6002ovPDHLkV1lB5uqXfk literal 0 HcmV?d00001 diff --git a/assets/icons/unload.svg b/assets/icons/unload.svg new file mode 100644 index 00000000..f1bd6540 --- /dev/null +++ b/assets/icons/unload.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/components/ModelSelector.vue b/components/ModelSelector.vue index a9f67d76..9ca65fa8 100644 --- a/components/ModelSelector.vue +++ b/components/ModelSelector.vue @@ -33,11 +33,11 @@ - diff --git a/entrypoints/settings/components/CollapsibleSection.vue b/entrypoints/settings/components/CollapsibleSection.vue new file mode 100644 index 00000000..0f2dd26c --- /dev/null +++ b/entrypoints/settings/components/CollapsibleSection.vue @@ -0,0 +1,45 @@ + + + diff --git a/entrypoints/settings/components/DebugSettings/index.vue b/entrypoints/settings/components/DebugSettings/index.vue index c238aa63..2c5b004b 100644 --- a/entrypoints/settings/components/DebugSettings/index.vue +++ b/entrypoints/settings/components/DebugSettings/index.vue @@ -76,37 +76,6 @@ dropdownAlign="left" /> -
- Enable Context Window Size (Num ctx) - -
-
-
Num ctx
- -
Default first token timeout (ms)
() const userConfig = await getUserConfig() -const numCtx = userConfig.llm.numCtx.toRef() -const enableNumCtx = userConfig.llm.enableNumCtx.toRef() const translationSystemPrompt = userConfig.translation.systemPrompt.toRef() const chatSystemPrompt = userConfig.chat.systemPrompt.toRef() const enableReasoning = userConfig.llm.reasoning.toRef() @@ -663,6 +630,7 @@ const simulateClickOnLink = userConfig.browserUse.simulateClickOnLink.toRef() const articles = ref<{ type: 'html' | 'pdf', url: string, title: string, content: string, html?: string, fileName?: string, parser: string }[]>() const modelProviderOptions = [ { id: 'ollama' as const, label: 'Ollama' }, + { id: 'lm-studio' as const, label: 'LM Studio' }, { id: 'web-llm' as const, label: 'Web LLM' }, ] diff --git a/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue b/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue new file mode 100644 index 00000000..86d4cf79 --- /dev/null +++ b/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue @@ -0,0 +1,309 @@ + + + diff --git a/entrypoints/settings/components/GeneralSettings/RunningModels/Card.vue b/entrypoints/settings/components/GeneralSettings/Blocks/ModelManagement/Card.vue similarity index 63% rename from entrypoints/settings/components/GeneralSettings/RunningModels/Card.vue rename to entrypoints/settings/components/GeneralSettings/Blocks/ModelManagement/Card.vue index 958c7f9d..db8980d8 100644 --- a/entrypoints/settings/components/GeneralSettings/RunningModels/Card.vue +++ b/entrypoints/settings/components/GeneralSettings/Blocks/ModelManagement/Card.vue @@ -1,15 +1,36 @@ @@ -104,7 +104,7 @@ -import { ref } from 'vue' +import { computed, ref } from 'vue' import ModelLogo from '@/components/ModelLogo.vue' import Selector from '@/components/Selector.vue' @@ -132,23 +133,38 @@ import StatusBadge from '@/components/StatusBadge.vue' import Button from '@/components/ui/Button.vue' import Divider from '@/components/ui/Divider.vue' import Text from '@/components/ui/Text.vue' -import { OLLAMA_TUTORIAL_URL } from '@/utils/constants' +import { LM_STUDIO_SEARCH_URL, LM_STUDIO_TUTORIAL_URL, OLLAMA_SEARCH_URL, OLLAMA_TUTORIAL_URL } from '@/utils/constants' import { formatSize } from '@/utils/formatter' import { useI18n } from '@/utils/i18n' -import { PREDEFINED_OLLAMA_MODELS } from '@/utils/llm/predefined-models' +import { PREDEFINED_LM_STUDIO_MODELS, PREDEFINED_OLLAMA_MODELS } from '@/utils/llm/predefined-models' -import DownloadConfirmModal from './OllamaDownloadModal.vue' +import DownloadConfirmModal from './BackendDownloadModal.vue' +const props = defineProps<{ + endpointType: 'ollama' | 'lm-studio' +}>() const emit = defineEmits(['finished']) const { t } = useI18n() -const options = PREDEFINED_OLLAMA_MODELS.map((model) => ({ - id: model.id, - label: model.name, - value: model, -})) +const options = computed(() => { + const models = props.endpointType === 'ollama' ? PREDEFINED_OLLAMA_MODELS : PREDEFINED_LM_STUDIO_MODELS + return models.map((model) => ({ + id: model.id, + label: model.name, + value: model, + })) +}) -const tutorialUrl = OLLAMA_TUTORIAL_URL +const tutorialUrl = computed(() => { + if (props.endpointType === 'ollama') return OLLAMA_TUTORIAL_URL + else if (props.endpointType === 'lm-studio') return LM_STUDIO_TUTORIAL_URL + else return '' +}) +const searchUrl = computed(() => { + if (props.endpointType === 'ollama') return OLLAMA_SEARCH_URL + else if (props.endpointType === 'lm-studio') return LM_STUDIO_SEARCH_URL + else return '' +}) const selectedModel = ref() const modelToDownload = ref() diff --git a/entrypoints/sidepanel/components/Onboarding/BackendSelectionTutorialCard.vue b/entrypoints/sidepanel/components/Onboarding/BackendSelectionTutorialCard.vue new file mode 100644 index 00000000..bc6fbb60 --- /dev/null +++ b/entrypoints/sidepanel/components/Onboarding/BackendSelectionTutorialCard.vue @@ -0,0 +1,206 @@ + + + diff --git a/entrypoints/sidepanel/components/Onboarding/OllamaTutorialCard.vue b/entrypoints/sidepanel/components/Onboarding/OllamaTutorialCard.vue deleted file mode 100644 index f63ade37..00000000 --- a/entrypoints/sidepanel/components/Onboarding/OllamaTutorialCard.vue +++ /dev/null @@ -1,138 +0,0 @@ - - - diff --git a/entrypoints/sidepanel/components/Onboarding/index.vue b/entrypoints/sidepanel/components/Onboarding/index.vue index fcab1b73..c68fba1a 100644 --- a/entrypoints/sidepanel/components/Onboarding/index.vue +++ b/entrypoints/sidepanel/components/Onboarding/index.vue @@ -32,8 +32,8 @@ v-if="panel === 'tutorial'" class="bg-white rounded-lg overflow-hidden grow flex flex-col justify-between font" > - -
@@ -63,14 +64,14 @@ import ExhaustiveError from '@/components/ExhaustiveError.vue' import Logo from '@/components/Logo.vue' import ScrollContainer from '@/components/ScrollContainer.vue' import { useI18n } from '@/utils/i18n' -import { useOllamaStatusStore } from '@/utils/pinia-store/store' +import { useLLMBackendStatusStore } from '@/utils/pinia-store/store' import { getUserConfig, TARGET_ONBOARDING_VERSION } from '@/utils/user-config' import { showSettings } from '../../../../utils/settings' import { Chat } from '../../utils/chat' import { welcomeMessage } from '../../utils/chat/texts' -import OllamaModelDownloader from './OllamaModelDownloader.vue' -import OllamaTutorialCard from './OllamaTutorialCard.vue' +import BackendModelDownloader from './BackendModelDownloader.vue' +import BackendSelectionTutorialCard from './BackendSelectionTutorialCard.vue' import SloganCard from './SloganCard.vue' import WebLLMTutorialCard from './WebLLMTutorialCard.vue' @@ -78,17 +79,19 @@ const isFirefox = import.meta.env.FIREFOX const { t } = useI18n() const userConfig = await getUserConfig() const chat = await Chat.getInstance() -const ollamaStatusStore = useOllamaStatusStore() +const llmBackendStatusStore = useLLMBackendStatusStore() const endpointType = userConfig.llm.endpointType.toRef() const onboardingVersion = userConfig.ui.onboarding.version.toRef() const panel = ref<'tutorial' | 'model-downloader'>('tutorial') +const downloadEndpointType = ref<'ollama' | 'lm-studio'>('ollama') const isShow = computed(() => { return onboardingVersion.value !== TARGET_ONBOARDING_VERSION }) -const onOllamaInstalled = async () => { - endpointType.value = 'ollama' - const modelList = await ollamaStatusStore.updateModelList() +const onBackendInstalled = async (backend: 'ollama' | 'lm-studio') => { + endpointType.value = backend + downloadEndpointType.value = backend + const modelList = backend === 'ollama' ? await llmBackendStatusStore.updateOllamaModelList() : await llmBackendStatusStore.updateLMStudioModelList() if (modelList.length === 0) { panel.value = 'model-downloader' } @@ -105,8 +108,8 @@ const onOpenSettings = async () => { const onModelDownloaderFinished = async () => { endpointType.value = 'ollama' - await ollamaStatusStore.updateConnectionStatus() - await ollamaStatusStore.updateModelList() + await llmBackendStatusStore.updateOllamaConnectionStatus() + await llmBackendStatusStore.updateOllamaModelList() close() } @@ -135,10 +138,10 @@ const close = () => { onMounted(async () => { if (isShow.value) { - const success = await ollamaStatusStore.updateConnectionStatus() - if (success) { - onOllamaInstalled() - } + const ollamaSuccess = await llmBackendStatusStore.updateOllamaConnectionStatus() + if (ollamaSuccess) return onBackendInstalled('ollama') + const lmStudioSuccess = await llmBackendStatusStore.updateLMStudioConnectionStatus() + if (lmStudioSuccess) return onBackendInstalled('lm-studio') } }) diff --git a/entrypoints/sidepanel/utils/agent/index.ts b/entrypoints/sidepanel/utils/agent/index.ts index 1fecc893..915243bd 100644 --- a/entrypoints/sidepanel/utils/agent/index.ts +++ b/entrypoints/sidepanel/utils/agent/index.ts @@ -8,7 +8,7 @@ import { AssistantMessageV1 } from '@/types/chat' import { PromiseOr } from '@/types/common' import { Base64ImageData, ImageDataWithId } from '@/types/image' import { TagBuilderJSON } from '@/types/prompt' -import { AbortError, AiSDKError, AppError, ErrorCode, fromError, ModelNotFoundError, ModelRequestError, ParseFunctionCallError, UnknownError } from '@/utils/error' +import { AbortError, AiSDKError, AppError, ErrorCode, fromError, LMStudioLoadModelError, ModelNotFoundError, ModelRequestError, ParseFunctionCallError, UnknownError } from '@/utils/error' import { useGlobalI18n } from '@/utils/i18n' import { generateRandomId } from '@/utils/id' import { InferredParams } from '@/utils/llm/tools/prompt-based/helpers' @@ -408,7 +408,7 @@ export class Agent { const { t } = await useGlobalI18n() const errorMsg = agentMessageManager.convertToAssistantMessage() errorMsg.isError = true - errorMsg.content = t('errors.model_not_found') + errorMsg.content = t('errors.model_not_found', { endpointType: error.endpointType === 'ollama' ? 'Ollama' : 'LM Studio' }) // unresolvable error, break the loop return false } @@ -416,14 +416,21 @@ export class Agent { const { t } = await useGlobalI18n() const errorMsg = agentMessageManager.convertToAssistantMessage() errorMsg.isError = true - errorMsg.content = t('errors.model_request_error') + errorMsg.content = t('errors.model_request_error', { endpointType: error.endpointType === 'ollama' ? 'Ollama' : 'LM Studio' }) return false } - else if (error instanceof UnknownError) { + else if (error instanceof LMStudioLoadModelError) { const { t } = await useGlobalI18n() const errorMsg = agentMessageManager.convertToAssistantMessage() errorMsg.isError = true - errorMsg.content = t('errors.unknown_error', { error: error.message }) + errorMsg.content = t('errors.unknown_error', { message: error.message.split('\n')[0].slice(0, 50) }) + return false + } + else if (error instanceof AppError) { + const { t } = await useGlobalI18n() + const errorMsg = agentMessageManager.convertToAssistantMessage() + errorMsg.isError = true + errorMsg.content = t('errors.unknown_error', { message: error.message }) return false } return true // continue loop if not fatal error diff --git a/entrypoints/sidepanel/utils/chat/chat.ts b/entrypoints/sidepanel/utils/chat/chat.ts index 14a3735d..28370297 100644 --- a/entrypoints/sidepanel/utils/chat/chat.ts +++ b/entrypoints/sidepanel/utils/chat/chat.ts @@ -7,7 +7,6 @@ import { ContextAttachmentStorage } from '@/types/chat' import { nonNullable } from '@/utils/array' import { ADVANCED_MODELS_FOR_AGENT } from '@/utils/constants' import { debounce } from '@/utils/debounce' -import { AbortError, AppError } from '@/utils/error' import { useGlobalI18n } from '@/utils/i18n' import { generateRandomId } from '@/utils/id' import { PromptBasedToolName } from '@/utils/llm/tools/prompt-based/tools' @@ -421,7 +420,7 @@ export class Chat { static createActionEventDispatcher(action: ActionType) { return function actionEvent(data: ActionV1[ActionType], el?: HTMLElement | EventTarget | null) { log.debug('Creating action event', action, data) - ;(el ?? window).dispatchEvent(new ActionEvent(action, data)) + ; (el ?? window).dispatchEvent(new ActionEvent(action, data)) } } @@ -464,19 +463,6 @@ export class Chat { return this.status.value === 'pending' || this.status.value === 'streaming' } - private async errorHandler(e: unknown, msg?: AssistantMessageV1) { - log.error('Error in chat', e) - if (!(e instanceof AbortError)) { - const errorMsg = msg || this.historyManager.appendAssistantMessage() - errorMsg.isError = true - errorMsg.done = true - errorMsg.content = e instanceof AppError ? await e.toLocaleMessage() : 'Unexpected error occurred' - } - else if (msg) { - this.historyManager.deleteMessage(msg) - } - } - statusScope(status: Exclude) { log.debug('statusScope', status) this.status.value = status diff --git a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts index da814ab9..ce498e70 100644 --- a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts +++ b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts @@ -6,7 +6,7 @@ import { markdownSectionDiff } from '@/utils/diff' import { useGlobalI18n } from '@/utils/i18n' import Logger from '@/utils/logger' import { makeIcon, makeRawHtmlTag } from '@/utils/markdown/content' -import { useOllamaStatusStore } from '@/utils/pinia-store/store' +import { useLLMBackendStatusStore } from '@/utils/pinia-store/store' import { Tab } from '@/utils/tab' import { timeout } from '@/utils/timeout' import { isUrlEqual } from '@/utils/url' @@ -265,7 +265,7 @@ export const executeViewImage: AgentToolCallExecute<'view_image'> = async ({ par }, }] } - const supportVision = await useOllamaStatusStore().checkCurrentModelSupportVision() + const supportVision = await useLLMBackendStatusStore().checkCurrentModelSupportVision() if (!supportVision) { taskMsg.icon = 'warningColored' taskMsg.summary = `Current model does not support image processing` diff --git a/entrypoints/sidepanel/utils/llm.ts b/entrypoints/sidepanel/utils/llm.ts index eb6d4521..ed649ba1 100644 --- a/entrypoints/sidepanel/utils/llm.ts +++ b/entrypoints/sidepanel/utils/llm.ts @@ -1,3 +1,4 @@ +import { DownloadProgressUpdate } from '@lmstudio/sdk' import type { InitProgressReport } from '@mlc-ai/web-llm' import { TextStreamPart, ToolSet } from 'ai' import type { ProgressResponse } from 'ollama/browser' @@ -92,6 +93,18 @@ export async function* pullOllamaModel(modelId: string, abortSignal?: AbortSigna yield* iter } +export async function* pullLMStudioModel(modelName: string, abortSignal?: AbortSignal) { + const { portName } = await s2bRpc.pullLMStudioModel(modelName) + const aliveKeeper = new BackgroundAliveKeeper() + const port = browser.runtime.connect({ name: portName }) + port.onDisconnect.addListener(() => aliveKeeper.dispose()) + abortSignal?.addEventListener('abort', () => { + port.disconnect() + }) + const iter = readPortMessageIntoIterator(port, { abortSignal }) + yield* iter +} + export async function* initWebLLMEngine(model: WebLLMSupportedModel) { const { portName } = await s2bRpc.initWebLLMEngine(model) const port = browser.runtime.connect({ name: portName }) diff --git a/locales/de.json b/locales/de.json index eba5a6a2..609e4843 100644 --- a/locales/de.json +++ b/locales/de.json @@ -100,17 +100,15 @@ "title": "KI direkt auf Ihrem Gerät – nur für Sie entwickelt" }, "guide": { - "already_installed": "Haben Sie Ollama bereits installiert und gestartet?", + "already_installed": "Bereits installiert und {endpointType} ausgeführt?", "features": { "1": "Führen Sie erweiterte Modelle wie gpt-oss, DeepSeek, Qwen, Llama aus", "2": "Passen Sie die Modelle mit voller Steuerung an und wechseln Sie sie", "3": "Ihre Daten bleiben privat auf Ihrem Gerät" }, "follow_our_tutorial": "Folgen Sie unserem Installationshandbuch", - "get_ollama": "Holen Sie sich Ollama", - "install_desc": "Installieren Sie Ollama, um Ihre lokale KI einzurichten.", + "install_desc": "Wie möchten Sie KI lokal ausführen?", "need_help": "Benötigen Sie Hilfe?", - "ollama_desc": "Mit Ollama die volle lokale KI -Macht freischalten.", "setup": "Aufstellen", "step1": "Schritt 1", "download_model_to_begin": "Laden Sie ein Modell herunter, um zu beginnen.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Nicht sicher, welches Sie wählen sollen?", "learn_about_models": "Erfahren Sie mehr über Modelle", "looking_for_more_options": "🔍 Suchen Sie nach weiteren Optionen?", - "browse_more_models": "Weitere Modelle durchsuchen" + "browse_more_models": "Weitere Modelle durchsuchen", + "get_endpoint": "Erhalte {endpointType}", + "lm_studio_select_desc": "Benutzerfreundliche Desktop-Anwendung", + "ollama_select_desc": "Professionelles Entwicklertool", + "unlock_full_power_with": "Entfesseln Sie die volle lokale KI-Leistung mit {endpointType}." }, "webllm_tutorial": { "desc": "Verwenden Sie ein leichtes Modell (QWEN 0,6B) direkt in Ihrem Browser - kein Setup, kein Warten.", @@ -132,7 +134,8 @@ "title": "👋 Willkommen bei **NativeMind**", "body": "NATIVEMIND ist eine Datenschutz-AI-Browser-Erweiterung, die Ihnen hilft, zu chatten, zu suchen und zu übersetzen-alles mit Sprachmodellen für das Gerät.\n\n\nFolgendes können Sie mit Nativemind machen:\n\n\n\n- Chatten Sie über mehrere Registerkarten hinweg, um verschiedene Seiten im Auge zu behalten.\n\n- Durchsuchen Sie das Web direkt im Chat, um einen weiteren Kontext zu erhalten.\n\n- Klicken Sie mit der rechten Maustaste, um einen Teil der Seite sofort zu übersetzen.\n\n- Modelle jederzeit in Einstellungen wechseln oder herunterladen.\n\n\nSie können zunächst die schnellen Aktionen unten ausprobieren." }, - "ollama_is_running": "Ollama läuft" + "ollama_is_running": "Ollama läuft", + "backend_is_running": "{endpointType} läuft" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Modelle", "discover_more": "Entdecken Sie weitere Modelle", "no_model": "⚠️ Kein Modell", - "add_model_to_start": "Fügen Sie ein Modell hinzu, um zu beginnen" + "add_model_to_start": "Fügen Sie ein Modell hinzu, um zu beginnen", + "ollama_models": "Ollama Modelle ({count})", + "lmstudio_models": "LM Studio Modelle ({count})" }, "ollama": { "already_installed": "Haben Sie Ollama bereits installiert und gestartet?", @@ -182,19 +187,15 @@ "unload": "Entladen", "running": "Läuft", "expires_in": "Läuft ab in {duration}", - "running_models": "Laufende Modelle" + "running_models": "Laufende Modelle", + "model_management": "Modellverwaltung", + "downloaded_models": "Heruntergeladene Modelle" }, "prompts": { "chat_system_prompt": "Chat -System Eingabeaufforderung", "title": "Prompt", "translation_system_prompt": "Übersetzungs-System-Prompt" }, - "provider": { - "title": "Modellanbieter" - }, - "provider_model": { - "title": "Anbieter" - }, "quick_actions": { "description": "Richten Sie schnelle Aktionen ein, um Ihre bevorzugten Eingabeaufforderungen schneller auszuführen-aus dem neuen Chat oder dem Rechtsklickmenü.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} Quant", "thinking": "Denken", "no_active_models": "Ollama verbunden, aber keine aktiven Modelle", - "not_connected_to_ollama": "Nicht mit Ollama verbunden" - } + "not_connected_to_ollama": "Nicht mit Ollama verbunden", + "lm_studio_no_active_models": "LM Studio verbunden, aber keine aktiven Modelle", + "not_connected_to_lm_studio": "Nicht mit Ollama verbunden" + }, + "delete_model_confirm": "Sind Sie sicher, dass Sie das Modell \"{model}\" löschen möchten?" }, "title": "Einstellungen", "model_downloader": { @@ -248,7 +252,8 @@ "downloading": "Ihr Modell wird heruntergeladen. Bitte warten…", "downloading_model": "Lade \"{model}\" herunter", "retry": "Wiederholen", - "unable_to_download": "Download kann nicht gestartet werden" + "unable_to_download": "Download kann nicht gestartet werden", + "could_not_connect": "Wir konnten keine Verbindung zu {endpointType} herstellen. Bitte stellen Sie sicher, dass {endpointType} läuft und versuchen Sie es erneut." }, "webllm_downloader": { "description": "Um den lokalen Modus zu verwenden, müssen Sie das {model}-Modell ({size}) herunterladen. Möchten Sie es jetzt herunterladen?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Sterne auf GitHub", "goto_website": "Gehen Sie zur Website" - } + }, + "providers": { + "lm_studio": { + "title": "LM Studio konfigurieren", + "already_installed": "Bereits LM Studio installiert und ausgeführt?", + "model_management_desc": "Das Herunterladen, Laden und Löschen von Modellen muss in der LM Studio Desktop-App erfolgen. Dies zeigt nur den aktuellen Ausführungsstatus an." + }, + "ollama": { + "title": "Ollama konfigurieren" + } + }, + "get_lm_studio": "Holen Sie sich LM Studio", + "delete": "Löschen", + "lm_studio": { + "server_address": "Serveradresse", + "server_address_desc": "LM Studio Server-API-Endpunkt" + }, + "webllm_desc_lm_studio": "Sie verwenden jetzt WebLLM: qwen3:0.6b für einen schnellen Test. Für vollständige Modellunterstützung und bessere Leistung installieren Sie bitte LM Studio." }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "Ups! Etwas ist schiefgelaufen. Bitte überprüfen Sie Ihre Ollama-Verbindung in den Einstellungen und versuchen Sie es erneut.", - "model_request_error": "Ups! Etwas ist schiefgelaufen. Bitte überprüfen Sie Ihre Ollama-Verbindung in den Einstellungen und versuchen Sie es erneut.", - "model_request_timeout": "Anfrage-Timeout. Bitte überprüfen Sie Ihre Ollama-Verbindung oder starten Sie eine neue Sitzung, da lange Kontexte die Antwortzeiten beeinträchtigen können.", + "model_not_found": "Hoppla! Etwas ist schiefgelaufen. Bitte überprüfen Sie Ihre {endpointType}-Verbindung in den Einstellungen und versuchen Sie es erneut.", + "model_request_error": "Hoppla! Etwas ist schiefgelaufen. Bitte überprüfen Sie Ihre {endpointType}-Verbindung in den Einstellungen und versuchen Sie es erneut.", + "model_request_timeout": "Anfrage-Timeout, bitte überprüfen Sie Ihre {endpointType}-Verbindung oder erwägen Sie, eine neue Sitzung zu starten, da lange Kontexte die Antwortzeiten beeinträchtigen können.", "timeout_error": "Zeitüberschreitung bei der Operation: {message}", "unknown_error": "Ein unerwarteter Fehler ist aufgetreten: {message}", "webllm_not_supported": "WebLLM wird auf Ihrem Gerät nicht unterstützt.", diff --git a/locales/en.json b/locales/en.json index c2e51b0c..554bcf75 100644 --- a/locales/en.json +++ b/locales/en.json @@ -37,7 +37,9 @@ "running_models": "Running Models", "unload": "Unload", "running": "Running", - "expires_in": "Expires in {duration}" + "expires_in": "Expires in {duration}", + "model_management": "Model Management", + "downloaded_models": "Downloaded Models" }, "choose_model": "Choose Model", "download": "Download", @@ -50,7 +52,9 @@ "title": "Models", "discover_more": " Discover more models", "no_model": "⚠️ No model", - "add_model_to_start": "Add a model to start" + "add_model_to_start": "Add a model to start", + "ollama_models": "Ollama Models ({count})", + "lmstudio_models": "LM Studio Models ({count})" }, "quick_actions": { "title": "Customize Quick Actions", @@ -70,11 +74,15 @@ "join_waitlist": "💼 Want to use it at work? {join_waitlist_link} for enterprise updates.", "join_waitlist_link": "Join the waitlist" }, - "provider": { - "title": "Connection" - }, - "provider_model": { - "title": "Configure Ollama" + "providers": { + "ollama": { + "title": "Configure Ollama" + }, + "lm_studio": { + "title": "Configure LM Studio", + "model_management_desc": "Model download, loading, and deletion must be done in the LM Studio desktop app. This only shows current running status.", + "already_installed": "Already installed and run LM Studio?" + } }, "cache": { "title": "Translation Cache", @@ -101,8 +109,11 @@ "quant": "{level} quant", "thinking": "Thinking", "not_connected_to_ollama": "Not connected to Ollama", - "no_active_models": "Ollama connected, but no active models" - } + "no_active_models": "Ollama connected, but no active models", + "lm_studio_no_active_models": "LM Studio connected, but no active models", + "not_connected_to_lm_studio": "Not connected to Ollama" + }, + "delete_model_confirm": "Are you sure you want to delete the model \"{model}\"?" }, "title": "Settings", "model_downloader": { @@ -113,7 +124,8 @@ "download_model": "Download “{model}“\t", "unable_to_download": "Unable to Start Download", "retry": "Retry", - "could_not_connect_ollama": "We couldn’t connect to Ollama. Please make sure Ollama is running and try again." + "could_not_connect_ollama": "We couldn’t connect to {endpointType}. Please make sure {endpointType} is running and try again.", + "could_not_connect": "We couldn’t connect to {endpointType}. Please make sure {endpointType} is running and try again." }, "webllm_downloader": { "description": "To use Local mode, you need to download the {model} model ({size}). Would you like to download it now?" @@ -182,7 +194,14 @@ "header": { "starts_on_github": "Stars on GitHub", "goto_website": "Go to the Website" - } + }, + "get_lm_studio": "Get LM Studio", + "lm_studio": { + "server_address": "Server Address", + "server_address_desc": "LM Studio server API endpoint" + }, + "delete": "Delete", + "webllm_desc_lm_studio": "You are now using WebLLM: qwen3:0.6b for quick tryout. For full model support and better performance, please install LM Studio." }, "chat": { "messages": { @@ -286,15 +305,14 @@ }, "guide": { "step1": "STEP 1", - "install_desc": "Install Ollama to set up your local AI.", - "ollama_desc": "Unlock full local AI power with Ollama.", + "install_desc": "How do you want to run AI locally?", "features": { "1": "Run advanced models like gpt-oss, DeepSeek, Qwen, Llama", "2": "Customize and switch models with full control", "3": "Your data stays private, on your device" }, - "get_ollama": "Get Ollama", - "already_installed": "Already installed and run Ollama?", + "get_endpoint": "Get {endpointType}", + "already_installed": "Already installed and run {endpointType}?", "setup": "Setup", "need_help": "Need help?", "follow_our_tutorial": "Follow our installation guide", @@ -305,7 +323,10 @@ "no_sure_which_one": "🤔 Not sure which one to choose?", "learn_about_models": "Learn about models", "looking_for_more_options": "🔍 Looking for more options?", - "browse_more_models": "Browse more models" + "browse_more_models": "Browse more models", + "ollama_select_desc": "Professional Developer Tool", + "lm_studio_select_desc": "Easy-to-use Desktop App", + "unlock_full_power_with": "Unlock full local AI power with {endpointType}." }, "webllm_tutorial": { "title": "Not ready to install? Try it instantly.", @@ -317,7 +338,8 @@ "title": "👋 Welcome to **NativeMind**", "body": "NativeMind is a privacy-first AI browser extension that helps you chat, search, and translate — all powered by on-device language models.\n\nHere’s what you can do with NativeMind:\n\n\n- Chat across multiple tabs to keep track of different pages.\n- Search the web directly within the chat for more context.\n- Right-click to translate any part of the page instantly.\n- Switch or download models anytime in Settings.\n\nYou can start by trying out the quick actions below." }, - "ollama_is_running": "Ollama is running" + "ollama_is_running": "Ollama is running", + "backend_is_running": "{endpointType} is running" }, "context_menu": { "quick_actions": { @@ -337,9 +359,9 @@ }, "errors": { "unknown_error": "An unexpected error occurred: {message}", - "model_request_error": "Oops! Something went wrong. Please check your Ollama connection in settings and try again.", - "model_not_found": "Oops! Something went wrong. Please check your Ollama connection in settings and try again.", - "model_request_timeout": "Request timeout, please check your Ollama connection or consider starting a new session as long contexts may affect response times.", + "model_request_error": "Oops! Something went wrong. Please check your {endpointType} connection in settings and try again.", + "model_not_found": "Oops! Something went wrong. Please check your {endpointType} connection in settings and try again.", + "model_request_timeout": "Request timeout, please check your {endpointType} connection or consider starting a new session as long contexts may affect response times.", "timeout_error": "Operation timed out: {message}", "webllm_not_supported": "WebLLM not supported on your device.", "max_characters_error": "Maximum {count} characters allowed" diff --git a/locales/es.json b/locales/es.json index 6d37bec4..e303285b 100644 --- a/locales/es.json +++ b/locales/es.json @@ -111,9 +111,9 @@ } }, "errors": { - "model_not_found": "¡Ups! Algo salió mal. Por favor, verifica tu conexión de Ollama en la configuración e inténtalo de nuevo.", - "model_request_error": "¡Ups! Algo salió mal. Por favor, verifica tu conexión de Ollama en la configuración e inténtalo de nuevo.", - "model_request_timeout": "Tiempo de espera agotado. Por favor verifica tu conexión de Ollama o considera iniciar una nueva sesión, ya que los contextos largos pueden afectar los tiempos de respuesta.", + "model_not_found": "¡Ups! Algo salió mal. Por favor verifica tu conexión de {endpointType} en la configuración e inténtalo de nuevo.", + "model_request_error": "¡Ups! Algo salió mal. Por favor verifica tu conexión de {endpointType} en la configuración e inténtalo de nuevo.", + "model_request_timeout": "Tiempo de espera agotado, por favor verifica tu conexión {endpointType} o considera iniciar una nueva sesión ya que los contextos largos pueden afectar los tiempos de respuesta.", "timeout_error": "La operación agotó el tiempo de espera: {message}", "unknown_error": "Se produjo un error inesperado: {message}", "webllm_not_supported": "WebLLM no es compatible con tu dispositivo.", @@ -125,17 +125,15 @@ "title": "IA en el dispositivo, creada solo para ti" }, "guide": { - "already_installed": "¿Ya instalaste y ejecutaste Ollama?", + "already_installed": "¿Ya instalaste y ejecutaste {endpointType}?", "features": { "1": "Ejecuta modelos avanzados como gpt-oss, DeepSeek, Qwen, Llama", "2": "Personaliza y cambia modelos con control total", "3": "Tus datos permanecen privados, en tu dispositivo" }, "follow_our_tutorial": "Sigue nuestra guía de instalación", - "get_ollama": "Obtener Ollama", - "install_desc": "Instala Ollama para configurar tu IA local.", + "install_desc": "¿Cómo quieres ejecutar IA localmente?", "need_help": "¿Necesitas ayuda?", - "ollama_desc": "Desbloquea todo el poder de la IA local con Ollama.", "setup": "Configuración", "step1": "PASO 1", "download_model_to_begin": "Descarga un modelo para comenzar.", @@ -145,7 +143,11 @@ "no_sure_which_one": "🤔 ¿No estás seguro cuál elegir?", "learn_about_models": "Aprende sobre modelos", "looking_for_more_options": "🔍 ¿Buscas más opciones?", - "browse_more_models": "Explorar más modelos" + "browse_more_models": "Explorar más modelos", + "get_endpoint": "Obtener {endpointType}", + "lm_studio_select_desc": "Aplicación de Escritorio Fácil de Usar", + "ollama_select_desc": "Herramienta de Desarrollador Profesional", + "unlock_full_power_with": "Desbloquea todo el poder de la IA local con {endpointType}." }, "webllm_tutorial": { "desc": "Usa un modelo ligero (Qwen 0.6b) directamente en tu navegador — sin configuración, sin espera.", @@ -157,7 +159,8 @@ "body": "NativeMind es una extensión de navegador con IA que prioriza la privacidad y te ayuda a chatear, buscar y traducir — todo impulsado por modelos de lenguaje en el dispositivo.\n\nEsto es lo que puedes hacer con NativeMind:\n\n- Chatear en múltiples pestañas para hacer seguimiento de diferentes páginas.\n- Buscar en la web directamente dentro del chat para más contexto.\n- Hacer clic derecho para traducir cualquier parte de la página al instante.\n- Cambiar o descargar modelos en cualquier momento en Configuración.\n\nPuedes empezar probando las acciones rápidas a continuación.", "title": "👋 Bienvenido a **NativeMind**" }, - "ollama_is_running": "Ollama está ejecutándose" + "ollama_is_running": "Ollama está ejecutándose", + "backend_is_running": "{endpointType} está ejecutándose" }, "settings": { "cache": { @@ -193,15 +196,20 @@ "quant": "{level} cuantitativo", "thinking": "Pensando", "no_active_models": "Ollama conectado, pero no hay modelos activos", - "not_connected_to_ollama": "No conectado a Ollama" - } + "not_connected_to_ollama": "No conectado a Ollama", + "lm_studio_no_active_models": "LM Studio conectado, pero no hay modelos activos", + "not_connected_to_lm_studio": "No conectado a Ollama" + }, + "delete_model_confirm": "¿Estás seguro de que quieres eliminar el modelo \"{model}\"?" }, "get_ollama": "Obtener Ollama", "models": { "title": "Modelos", "discover_more": "Descubre más modelos", "no_model": "⚠️ Sin modelo", - "add_model_to_start": "Añadir un modelo para empezar" + "add_model_to_start": "Añadir un modelo para empezar", + "ollama_models": "Modelos de Ollama ({count})", + "lmstudio_models": "Modelos de LM Studio ({count})" }, "ollama": { "already_installed": "¿Ya instalaste y ejecutaste Ollama?", @@ -223,19 +231,15 @@ "unload": "Descargar", "expires_in": "Expira en {duration}", "running": "Ejecutándose", - "running_models": "Modelos en Ejecución" + "running_models": "Modelos en Ejecución", + "model_management": "Gestión de Modelos", + "downloaded_models": "Modelos Descargados" }, "prompts": { "chat_system_prompt": "Sistema de Indicaciones de Chat", "title": "Instrucciones", "translation_system_prompt": "Prompt del sistema de traducción" }, - "provider": { - "title": "Conexión" - }, - "provider_model": { - "title": "Configurar Ollama" - }, "quick_actions": { "description": "Configura Acciones Rápidas para ejecutar tus instrucciones favoritas más rápido -- desde Nuevo Chat o el menú contextual.", "edit": { @@ -273,7 +277,8 @@ "download": "Descargar", "retry": "Reintentar", "unable_to_download": "No se puede iniciar la descarga", - "could_not_connect_ollama": "No pudimos conectar con Ollama. Por favor, asegúrate de que Ollama esté ejecutándose e inténtalo de nuevo." + "could_not_connect_ollama": "No pudimos conectar con Ollama. Por favor, asegúrate de que Ollama esté ejecutándose e inténtalo de nuevo.", + "could_not_connect": "No pudimos conectar con {endpointType}. Por favor, asegúrate de que {endpointType} esté ejecutándose e inténtalo de nuevo." }, "webllm_downloader": { "description": "Para usar el modo Local, necesitas descargar el modelo {model} ({size}). ¿Te gustaría descargarlo ahora?" @@ -342,7 +347,24 @@ "header": { "starts_on_github": "Estrellas en GitHub", "goto_website": "Ir al Sitio Web" - } + }, + "providers": { + "lm_studio": { + "title": "Configurar LM Studio", + "already_installed": "¿Ya instalaste y ejecutaste LM Studio?", + "model_management_desc": "La descarga, carga y eliminación de modelos debe realizarse en la aplicación de escritorio de LM Studio. Esto solo muestra el estado de ejecución actual." + }, + "ollama": { + "title": "Configurar Ollama" + } + }, + "get_lm_studio": "Obtener LM Studio", + "delete": "Eliminar", + "lm_studio": { + "server_address": "Dirección del Servidor", + "server_address_desc": "Punto final de la API del servidor LM Studio" + }, + "webllm_desc_lm_studio": "Ahora estás usando WebLLM: qwen3:0.6b para una prueba rápida. Para soporte completo del modelo y mejor rendimiento, por favor instala LM Studio." }, "common": { "cancel": "Cancelar", diff --git a/locales/fr.json b/locales/fr.json index 2c1103db..0977dbb8 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -100,17 +100,15 @@ "title": "IA sur appareil, conçue spécialement pour vous" }, "guide": { - "already_installed": "Ollama est déjà installé et en cours d'exécution ?", + "already_installed": "Déjà installé et exécuté {endpointType} ?", "features": { "1": "Exécutez des modèles avancés comme gpt-oss, DeepSeek, Qwen, Llama", "2": "Personnalisez et changez de modèles avec un contrôle total", "3": "Vos données restent privées, sur votre appareil" }, "follow_our_tutorial": "Suivez notre guide d'installation", - "get_ollama": "Obtenir Ollama", - "install_desc": "Installez Ollama pour configurer votre IA locale.", + "install_desc": "Comment voulez-vous exécuter l'IA localement ?", "need_help": "Besoin d'aide ?", - "ollama_desc": "Libérez toute la puissance de l'IA locale avec Ollama.", "setup": "Configuration", "step1": "ÉTAPE 1", "download_model_to_begin": "Téléchargez un modèle pour commencer.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Pas sûr lequel choisir ?", "learn_about_models": "En savoir plus sur les modèles", "looking_for_more_options": "🔍 Vous cherchez plus d'options ?", - "browse_more_models": "Parcourir plus de modèles" + "browse_more_models": "Parcourir plus de modèles", + "get_endpoint": "Obtenir {endpointType}", + "lm_studio_select_desc": "Application de bureau facile à utiliser", + "ollama_select_desc": "Outil de Développement Professionnel", + "unlock_full_power_with": "Débloquez toute la puissance de l'IA locale avec {endpointType}." }, "webllm_tutorial": { "desc": "Utilisez un modèle léger (Qwen 0.6b) directement dans votre navigateur - sans configuration, sans attente.", @@ -132,7 +134,8 @@ "title": "👋 Bienvenue sur **NativeMind**", "body": "NativeMind est une extension de navigateur IA qui privilégie la confidentialité et vous aide à discuter, rechercher et traduire - le tout alimenté par des modèles de langage fonctionnant sur votre appareil.\n\n\nVoici ce que vous pouvez faire avec NativeMind :\n\n\n\n- Discuter à travers plusieurs onglets pour suivre différentes pages.\n\n- Rechercher sur le Web directement dans le chat pour plus de contexte.\n\n- Cliquer avec le bouton droit pour traduire instantanément n'importe quelle partie de la page.\n\n- Changer ou télécharger des modèles à tout moment dans les Paramètres.\n\n\nVous pouvez commencer par essayer les actions rapides ci-dessous." }, - "ollama_is_running": "Ollama est en cours d'exécution" + "ollama_is_running": "Ollama est en cours d'exécution", + "backend_is_running": "{endpointType} est en cours d'exécution" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Modèles", "discover_more": "Découvrez plus de modèles", "no_model": "⚠️ Aucun modèle", - "add_model_to_start": "Ajouter un modèle pour commencer" + "add_model_to_start": "Ajouter un modèle pour commencer", + "ollama_models": "Modèles Ollama ({count})", + "lmstudio_models": "LM Studio Modèles ({count})" }, "ollama": { "already_installed": "Ollama est déjà installé et en cours d'exécution ?", @@ -182,19 +187,15 @@ "unload": "Décharger", "expires_in": "Expire dans {duration}", "running": "En cours d'exécution", - "running_models": "Modèles en cours d'exécution" + "running_models": "Modèles en cours d'exécution", + "model_management": "Gestion des modèles", + "downloaded_models": "Modèles Téléchargés" }, "prompts": { "chat_system_prompt": "Instruction système de chat", "title": "Instructions", "translation_system_prompt": "Prompt système de traduction" }, - "provider": { - "title": "Connexion" - }, - "provider_model": { - "title": "Configurer Ollama" - }, "quick_actions": { "description": "Configurez des actions rapides pour exécuter vos instructions préférées plus rapidement - depuis le nouveau chat ou le menu contextuel.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} quant", "thinking": "Réflexion", "no_active_models": "Ollama connecté, mais aucun modèle actif", - "not_connected_to_ollama": "Pas connecté à Ollama" - } + "not_connected_to_ollama": "Pas connecté à Ollama", + "lm_studio_no_active_models": "LM Studio connecté, mais aucun modèle actif", + "not_connected_to_lm_studio": "Pas connecté à Ollama" + }, + "delete_model_confirm": "Êtes-vous sûr de vouloir supprimer le modèle \"{model}\" ?" }, "title": "Paramètres", "model_downloader": { @@ -248,7 +252,8 @@ "download": "Télécharger", "retry": "Réessayer", "unable_to_download": "Impossible de démarrer le téléchargement", - "could_not_connect_ollama": "Nous n'avons pas pu nous connecter à Ollama. Veuillez vous assurer qu'Ollama est en cours d'exécution et réessayer." + "could_not_connect_ollama": "Nous n'avons pas pu nous connecter à Ollama. Veuillez vous assurer qu'Ollama est en cours d'exécution et réessayer.", + "could_not_connect": "Nous n'avons pas pu nous connecter à {endpointType}. Veuillez vous assurer que {endpointType} est en cours d'exécution et réessayer." }, "webllm_downloader": { "description": "Pour utiliser le mode Local, vous devez télécharger le modèle {model} ({size}). Souhaitez-vous le télécharger maintenant ?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Étoiles sur GitHub", "goto_website": "Aller sur le site Web" - } + }, + "providers": { + "lm_studio": { + "title": "Configurer LM Studio", + "already_installed": "Déjà installé et exécuté LM Studio ?", + "model_management_desc": "Le téléchargement, le chargement et la suppression de modèles doivent être effectués dans l'application de bureau LM Studio. Ceci affiche uniquement l'état d'exécution actuel." + }, + "ollama": { + "title": "Configurer Ollama" + } + }, + "get_lm_studio": "Obtenez LM Studio", + "delete": "Supprimer", + "lm_studio": { + "server_address": "Adresse du serveur", + "server_address_desc": "Point de terminaison de l'API du serveur LM Studio" + }, + "webllm_desc_lm_studio": "Vous utilisez maintenant WebLLM : qwen3:0.6b pour un essai rapide. Pour un support complet des modèles et de meilleures performances, veuillez installer LM Studio." }, "context_menu": { "quick_actions": { @@ -336,11 +358,11 @@ } }, "errors": { - "model_not_found": "Oups ! Quelque chose s'est mal passé. Veuillez vérifier votre connexion Ollama dans les paramètres et réessayer.", - "model_request_error": "Oups ! Quelque chose s'est mal passé. Veuillez vérifier votre connexion Ollama dans les paramètres et réessayer.", + "model_not_found": "Oups ! Quelque chose s'est mal passé. Veuillez vérifier votre connexion {endpointType} dans les paramètres et réessayer.", + "model_request_error": "Oups ! Quelque chose s'est mal passé. Veuillez vérifier votre connexion {endpointType} dans les paramètres et réessayer.", "timeout_error": "Opération expirée : {message}", "unknown_error": "Une erreur inattendue s'est produite : {message}", - "model_request_timeout": "Délai de requête expiré. Veuillez vérifier votre connexion Ollama ou envisager de démarrer une nouvelle session, car les contextes longs peuvent affecter les temps de réponse.", + "model_request_timeout": "Délai d'attente de la demande dépassé, veuillez vérifier votre connexion {endpointType} ou envisager de démarrer une nouvelle session car les contextes longs peuvent affecter les temps de réponse.", "webllm_not_supported": "WebLLM n'est pas pris en charge sur votre appareil.", "max_characters_error": "Maximum {count} caractères autorisés" }, diff --git a/locales/id.json b/locales/id.json index 90b9b306..f84dc95e 100644 --- a/locales/id.json +++ b/locales/id.json @@ -100,17 +100,15 @@ "title": "AI di perangkat, dibuat khusus untuk Anda" }, "guide": { - "already_installed": "Sudah menginstal dan menjalankan Ollama?", + "already_installed": "Sudah terinstal dan menjalankan {endpointType}?", "features": { "1": "Jalankan model canggih seperti gpt-oss, DeepSeek, Qwen, Llama", "2": "Sesuaikan dan beralih antar model dengan kontrol penuh", "3": "Data Anda tetap pribadi, tersimpan di perangkat Anda" }, "follow_our_tutorial": "Ikuti panduan instalasi kami", - "get_ollama": "Dapatkan Ollama", - "install_desc": "Instal Ollama untuk mengatur AI lokal Anda.", + "install_desc": "Bagaimana Anda ingin menjalankan AI secara lokal?", "need_help": "Butuh bantuan?", - "ollama_desc": "Buka kekuatan penuh AI lokal dengan Ollama.", "setup": "Pengaturan", "step1": "LANGKAH 1", "download_model_to_begin": "Unduh model untuk memulai.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Tidak yakin yang mana yang dipilih?", "learn_about_models": "Pelajari tentang model", "looking_for_more_options": "🔍 Mencari lebih banyak opsi?", - "browse_more_models": "Jelajahi lebih banyak model" + "browse_more_models": "Jelajahi lebih banyak model", + "get_endpoint": "Dapatkan {endpointType}", + "lm_studio_select_desc": "Aplikasi Desktop yang Mudah Digunakan", + "ollama_select_desc": "Alat Pengembang Profesional", + "unlock_full_power_with": "Buka kekuatan AI lokal penuh dengan {endpointType}." }, "webllm_tutorial": { "desc": "Gunakan model ringan (Qwen 0.6b) langsung di browser Anda — tanpa pengaturan, tanpa menunggu.", @@ -132,7 +134,8 @@ "title": "👋 Selamat datang di **NativeMind**", "body": "NativeMind adalah ekstensi browser AI yang mengutamakan privasi dan membantu Anda mengobrol, mencari, dan menerjemahkan — semuanya didukung oleh model bahasa yang berjalan di perangkat Anda.\n\nInilah yang dapat Anda lakukan dengan NativeMind:\n\n- Mengobrol di beberapa tab untuk melacak berbagai halaman.\n- Mencari di web secara langsung dalam obrolan untuk mendapatkan konteks lebih banyak.\n- Klik kanan untuk menerjemahkan bagian mana pun dari halaman secara instan.\n- Beralih atau mengunduh model kapan saja di Pengaturan.\n\nAnda dapat mulai dengan mencoba tindakan cepat di bawah ini." }, - "ollama_is_running": "Ollama sedang berjalan" + "ollama_is_running": "Ollama sedang berjalan", + "backend_is_running": "{endpointType} sedang berjalan" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Model", "discover_more": "Temukan lebih banyak model", "no_model": "⚠️ Tidak ada model", - "add_model_to_start": "Tambahkan model untuk memulai" + "add_model_to_start": "Tambahkan model untuk memulai", + "ollama_models": "Model Ollama ({count})", + "lmstudio_models": "Model LM Studio ({count})" }, "ollama": { "already_installed": "Sudah menginstal dan menjalankan Ollama?", @@ -182,19 +187,15 @@ "unload": "Bongkar", "expires_in": "Berakhir dalam {duration}", "running": "Berjalan", - "running_models": "Model yang Berjalan" + "running_models": "Model yang Berjalan", + "model_management": "Manajemen Model", + "downloaded_models": "Model yang Diunduh" }, "prompts": { "chat_system_prompt": "Prompt sistem obrolan", "title": "Prompt", "translation_system_prompt": "Prompt sistem terjemahan" }, - "provider": { - "title": "Koneksi" - }, - "provider_model": { - "title": "Konfigurasi Ollama" - }, "quick_actions": { "description": "Siapkan tindakan cepat untuk menjalankan prompt favorit Anda lebih cepat -- dari Obrolan Baru atau menu klik kanan.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} kuant", "thinking": "Berpikir", "no_active_models": "Ollama terhubung, tetapi tidak ada model yang aktif", - "not_connected_to_ollama": "Tidak terhubung ke Ollama" - } + "not_connected_to_ollama": "Tidak terhubung ke Ollama", + "lm_studio_no_active_models": "LM Studio terhubung, tetapi tidak ada model yang aktif", + "not_connected_to_lm_studio": "Tidak terhubung ke Ollama" + }, + "delete_model_confirm": "Apakah Anda yakin ingin menghapus model \"{model}\"?" }, "title": "Pengaturan", "model_downloader": { @@ -248,7 +252,8 @@ "download": "Unduh", "retry": "Coba lagi", "unable_to_download": "Tidak Dapat Memulai Unduhan", - "could_not_connect_ollama": "Kami tidak dapat terhubung ke Ollama. Pastikan Ollama sedang berjalan dan coba lagi." + "could_not_connect_ollama": "Kami tidak dapat terhubung ke Ollama. Pastikan Ollama sedang berjalan dan coba lagi.", + "could_not_connect": "Kami tidak dapat terhubung ke {endpointType}. Pastikan {endpointType} sedang berjalan dan coba lagi." }, "webllm_downloader": { "description": "Untuk menggunakan mode Lokal, Anda perlu mengunduh model {model} ({size}). Apakah Anda ingin mengunduhnya sekarang?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Bintang di GitHub", "goto_website": "Kunjungi Situs Web" - } + }, + "providers": { + "lm_studio": { + "title": "Konfigurasi LM Studio", + "already_installed": "Sudah menginstal dan menjalankan LM Studio?", + "model_management_desc": "Unduhan, pemuatan, dan penghapusan model harus dilakukan di aplikasi desktop LM Studio. Ini hanya menampilkan status yang sedang berjalan saat ini." + }, + "ollama": { + "title": "Konfigurasi Ollama" + } + }, + "get_lm_studio": "Dapatkan LM Studio", + "delete": "Hapus", + "lm_studio": { + "server_address": "Alamat Server", + "server_address_desc": "Endpoint API server LM Studio" + }, + "webllm_desc_lm_studio": "Anda sekarang menggunakan WebLLM: qwen3:0.6b untuk percobaan cepat. Untuk dukungan model lengkap dan performa yang lebih baik, silakan instal LM Studio." }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "Ups! Ada yang salah. Silakan periksa koneksi Ollama Anda di pengaturan dan coba lagi.", - "model_request_error": "Ups! Ada yang salah. Silakan periksa koneksi Ollama Anda di pengaturan dan coba lagi.", - "model_request_timeout": "Waktu permintaan habis. Silakan periksa koneksi Ollama Anda atau pertimbangkan untuk memulai sesi baru karena konteks yang panjang dapat memengaruhi waktu respons.", + "model_not_found": "Ups! Ada yang salah. Silakan periksa koneksi {endpointType} Anda di pengaturan dan coba lagi.", + "model_request_error": "Ups! Ada yang salah. Silakan periksa koneksi {endpointType} Anda di pengaturan dan coba lagi.", + "model_request_timeout": "Waktu permintaan habis, silakan periksa koneksi {endpointType} Anda atau pertimbangkan untuk memulai sesi baru karena konteks yang panjang dapat mempengaruhi waktu respons.", "timeout_error": "Operasi waktu habis: {message}", "unknown_error": "Terjadi kesalahan yang tidak terduga: {message}", "webllm_not_supported": "WebLLM tidak didukung pada perangkat Anda.", diff --git a/locales/ja.json b/locales/ja.json index 3f766e7d..33a400af 100644 --- a/locales/ja.json +++ b/locales/ja.json @@ -100,17 +100,15 @@ "title": "あなたのために開発されたオンデバイスAI" }, "guide": { - "already_installed": "すでにOllamaをインストールして実行していますか?", + "already_installed": "既に{endpointType}をインストールして実行していますか?", "features": { "1": "高度なモデル(gpt-oss、DeepSeek、Qwen、Llama)を実行する", "2": "完全な制御でモデルをカスタマイズし切り替えられます", "3": "あなたのデータはデバイス上でプライベートに保たれます" }, "follow_our_tutorial": "インストールガイドを確認する", - "get_ollama": "Ollamaを入手する", - "install_desc": "Ollamaをインストールして、ローカルAI環境を構築しましょう。", + "install_desc": "AIをローカルで実行するにはどうしたいですか?", "need_help": "お困りですか?", - "ollama_desc": "Ollamaで完全なローカルAIのパワーを活用しましょう。", "setup": "セットアップ", "step1": "ステップ 1", "download_model_to_begin": "モデルをダウンロードして開始してください。", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 どれを選べばよいかわからない?", "learn_about_models": "モデルについて学ぶ", "looking_for_more_options": "🔍 より多くのオプションをお探しですか?", - "browse_more_models": "他のモデルを見る" + "browse_more_models": "他のモデルを見る", + "get_endpoint": "{endpointType}を取得", + "lm_studio_select_desc": "使いやすいデスクトップアプリ", + "ollama_select_desc": "プロフェッショナル開発者ツール", + "unlock_full_power_with": "{endpointType}でローカルAIの全機能を解放しましょう。" }, "webllm_tutorial": { "desc": "ブラウザで直接軽量モデル(Qwen 0.6b)を使用できます。セットアップ不要、待ち時間なし。", @@ -132,7 +134,8 @@ "title": "👋 **NativeMind** へようこそ", "body": "NativeMindは、プライバシー重視のAIブラウザ拡張機能で、オンデバイス言語モデルを活用したチャット、検索、翻訳機能を提供します。\n\nNativeMindでできることは次のとおりです:\n\n- 複数のタブでチャットし、異なるページを管理できます。\n- チャット内で直接ウェブ検索を行い、より多くのコンテキストを得られます。\n- 右クリックでページの任意の部分を即座に翻訳できます。\n- 設定からいつでもモデルの切り替えやダウンロードが可能です。\n\n下記のクイックアクションを試してみましょう。" }, - "ollama_is_running": "Ollamaが実行中です" + "ollama_is_running": "Ollamaが実行中です", + "backend_is_running": "{endpointType}が実行中です" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "モデル", "discover_more": "より多くのモデルを発見する", "no_model": "⚠️ モデルなし", - "add_model_to_start": "モデルを追加して開始" + "add_model_to_start": "モデルを追加して開始", + "ollama_models": "Ollama モデル ({count})", + "lmstudio_models": "LM Studio モデル ({count})" }, "ollama": { "already_installed": "Ollamaをすでにインストールして実行していますか?", @@ -182,19 +187,15 @@ "unload": "アンロード", "expires_in": "{duration}で期限切れ", "running": "実行中", - "running_models": "実行中のモデル" + "running_models": "実行中のモデル", + "model_management": "モデル管理", + "downloaded_models": "ダウンロード済みモデル" }, "prompts": { "chat_system_prompt": "チャットシステムプロンプト", "title": "プロンプト", "translation_system_prompt": "翻訳システムプロンプト" }, - "provider": { - "title": "接続" - }, - "provider_model": { - "title": "Ollamaを設定する" - }, "quick_actions": { "description": "新規チャットや右クリックメニューから、お気に入りのプロンプトをより速く実行できるようクイックアクションを設定します。", "edit": { @@ -236,8 +237,11 @@ "quant": "{level}クオント", "thinking": "思考", "no_active_models": "Ollamaに接続されていますが、アクティブなモデルがありません", - "not_connected_to_ollama": "Ollamaに接続されていません" - } + "not_connected_to_ollama": "Ollamaに接続されていません", + "lm_studio_no_active_models": "LM Studio に接続されましたが、アクティブなモデルがありません", + "not_connected_to_lm_studio": "Ollamaに接続されていません" + }, + "delete_model_confirm": "モデル「{model}」を削除してもよろしいですか?" }, "title": "設定", "model_downloader": { @@ -248,7 +252,8 @@ "download": "ダウンロード", "retry": "リトライ", "unable_to_download": "ダウンロードを開始できません", - "could_not_connect_ollama": "Ollamaに接続できませんでした。Ollamaが実行されていることを確認して、もう一度お試しください。" + "could_not_connect_ollama": "Ollamaに接続できませんでした。Ollamaが実行されていることを確認して、もう一度お試しください。", + "could_not_connect": "{endpointType}に接続できませんでした。{endpointType}が実行されていることを確認して、もう一度お試しください。" }, "webllm_downloader": { "description": "ローカルモードを使用するには、{model}モデル({size})をダウンロードする必要があります。今すぐダウンロードしますか?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "GitHubでスター", "goto_website": "ウェブサイトに移動" - } + }, + "providers": { + "lm_studio": { + "title": "LM Studioを設定する", + "already_installed": "LM Studioはすでにインストールして実行していますか?", + "model_management_desc": "モデルのダウンロード、読み込み、削除はLM Studio デスクトップアプリで行う必要があります。これは現在の実行状態のみを表示します。" + }, + "ollama": { + "title": "Ollamaを設定する" + } + }, + "get_lm_studio": "LM Studioを入手", + "delete": "削除", + "lm_studio": { + "server_address": "サーバーアドレス", + "server_address_desc": "LM Studio サーバー API エンドポイント" + }, + "webllm_desc_lm_studio": "WebLLMを使用中:クイック試用のためのqwen3:0.6b。フルモデルサポートとより良いパフォーマンスのために、LM Studioをインストールしてください。" }, "context_menu": { "quick_actions": { @@ -336,11 +358,11 @@ } }, "errors": { - "model_not_found": "おっと!何か問題が発生しました。設定でOllama接続を確認し、再度お試しください。", + "model_not_found": "おっと!何かが間違いました。設定で{endpointType}接続を確認して、もう一度お試しください。", "timeout_error": "操作がタイムアウトしました:{message}", "unknown_error": "予期しないエラーが発生しました:{message}", - "model_request_error": "おっと!何か問題が発生しました。設定でOllama接続を確認し、再度お試しください。", - "model_request_timeout": "リクエストがタイムアウトしました。Ollama接続を確認するか、長いコンテキストが応答時間に影響を与える可能性があるため、新しいセッションの開始をご検討ください。", + "model_request_error": "おっと!何かが間違いました。設定で{endpointType}接続を確認して、もう一度お試しください。", + "model_request_timeout": "リクエストタイムアウトです。{endpointType}接続を確認するか、長いコンテキストが応答時間に影響する可能性があるため、新しいセッションを開始することを検討してください。", "webllm_not_supported": "WebLLMはお使いのデバイスではサポートされていません。", "max_characters_error": "最大 {count} 文字まで入力可能です" }, diff --git a/locales/ko.json b/locales/ko.json index 432f3040..3b5e997d 100644 --- a/locales/ko.json +++ b/locales/ko.json @@ -107,10 +107,8 @@ "3": "사용자의 데이터는 기기에서 안전하게 유지됩니다" }, "follow_our_tutorial": "설치 가이드를 확인하세요", - "get_ollama": "Ollama 얻기", "install_desc": "Ollama를 설치하여 로컬 AI를 설정하세요.", "need_help": "도움이 필요하세요?", - "ollama_desc": "Ollama로 로컬 AI의 모든 기능을 활용하세요.", "setup": "설정", "step1": "1단계", "download_model_to_begin": "시작하려면 모델을 다운로드하세요.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 어떤 것을 선택해야 할지 확실하지 않나요?", "learn_about_models": "모델에 대해 알아보기", "looking_for_more_options": "🔍 더 많은 옵션을 찾고 계신가요?", - "browse_more_models": "더 많은 모델 둘러보기" + "browse_more_models": "더 많은 모델 둘러보기", + "get_endpoint": "{endpointType} 가져오기", + "ollama_select_desc": "전문 개발자 도구", + "lm_studio_select_desc": "사용하기 쉬운 데스크톱 앱", + "unlock_full_power_with": "{endpointType}로 완전한 로컬 AI 성능을 잠금 해제하세요." }, "webllm_tutorial": { "desc": "브라우저에서 바로 경량 모델(Qwen 0.6B)을 사용해 보세요. 설정 없이 바로 시작할 수 있습니다.", @@ -132,7 +134,8 @@ "title": "👋 **NativeMind**에 오신 것을 환영합니다", "body": "NativeMind는 개인 정보 보호에 중점을 둔 AI 브라우저 확장 프로그램으로, 온디바이스 언어 모델을 통해 채팅, 검색 및 번역을 지원합니다.\n\n\nNativeMind로 할 수 있는 일은 다음과 같습니다.\n\n\n\n- 여러 탭에서 채팅하여 다양한 페이지의 내용을 파악할 수 있습니다.\n\n- 채팅 내에서 직접 웹을 검색하여 더 많은 정보를 얻을 수 있습니다.\n\n- 마우스 오른쪽 버튼을 클릭하여 페이지의 일부를 즉시 번역할 수 있습니다.\n\n- 설정에서 언제든지 모델을 변경하거나 새 모델을 다운로드할 수 있습니다.\n\n\n아래의 빠른 액션으로 지금 바로 시작해 보세요." }, - "ollama_is_running": "Ollama가 실행 중입니다" + "ollama_is_running": "Ollama가 실행 중입니다", + "backend_is_running": "{endpointType}이(가) 실행 중입니다" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "모델", "discover_more": "더 많은 모델 살펴보기", "no_model": "⚠️ 모델 없음", - "add_model_to_start": "모델을 추가하여 시작하세요" + "add_model_to_start": "모델을 추가하여 시작하세요", + "lmstudio_models": "LM Studio 모델 ({count})", + "ollama_models": "Ollama 모델 ({count})" }, "ollama": { "already_installed": "이미 Ollama를 설치하고 실행 중인가요?", @@ -182,19 +187,15 @@ "unload": "내리다", "expires_in": "{duration} 후 만료", "running": "실행 중", - "running_models": "실행 중인 모델" + "running_models": "실행 중인 모델", + "downloaded_models": "다운로드된 모델", + "model_management": "모델 관리" }, "prompts": { "chat_system_prompt": "채팅 시스템 프롬프트", "title": "프롬프트", "translation_system_prompt": "번역 시스템 프롬프트" }, - "provider": { - "title": "연결" - }, - "provider_model": { - "title": "Ollama 구성" - }, "quick_actions": { "description": "새 채팅이나 컨텍스트 메뉴에서 자주 사용하는 프롬프트를 더 빠르게 실행할 수 있도록 빠른 액션을 설정하세요.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} 퀀트", "thinking": "사고", "no_active_models": "Ollama가 연결되었지만 활성화된 모델이 없습니다", - "not_connected_to_ollama": "Ollama에 연결되지 않음" - } + "not_connected_to_ollama": "Ollama에 연결되지 않음", + "not_connected_to_lm_studio": "Ollama에 연결되지 않음", + "lm_studio_no_active_models": "LM Studio가 연결되었지만 활성화된 모델이 없습니다" + }, + "delete_model_confirm": "모델 \"{model}\"을(를) 삭제하시겠습니까?" }, "title": "설정", "model_downloader": { @@ -248,7 +252,8 @@ "download": "다운로드", "retry": "다시 시도", "unable_to_download": "다운로드를 시작할 수 없음", - "could_not_connect_ollama": "Ollama에 연결할 수 없습니다. Ollama가 실행 중인지 확인하고 다시 시도해 주세요." + "could_not_connect_ollama": "Ollama에 연결할 수 없습니다. Ollama가 실행 중인지 확인하고 다시 시도해 주세요.", + "could_not_connect": "{endpointType}에 연결할 수 없습니다. {endpointType}이 실행 중인지 확인하고 다시 시도해 주세요." }, "webllm_downloader": { "description": "로컬 모드를 사용하려면 {model} 모델({size})을 다운로드해야 합니다. 지금 다운로드하시겠습니까?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "GitHub에서 별점 주기", "goto_website": "웹사이트로 이동" - } + }, + "providers": { + "lm_studio": { + "title": "LM Studio 구성", + "already_installed": "이미 LM Studio를 설치하고 실행했나요?", + "model_management_desc": "모델 다운로드, 로딩, 삭제는 LM Studio 데스크톱 앱에서 수행해야 합니다. 여기서는 현재 실행 상태만 표시됩니다." + }, + "ollama": { + "title": "Ollama 구성" + } + }, + "delete": "삭제", + "get_lm_studio": "LM Studio 다운로드", + "lm_studio": { + "server_address": "서버 주소", + "server_address_desc": "LM Studio 서버 API 엔드포인트" + }, + "webllm_desc_lm_studio": "지금 빠른 체험을 위해 WebLLM: qwen3:0.6b를 사용하고 있습니다. 전체 모델 지원과 더 나은 성능을 위해서는 LM Studio를 설치해 주세요." }, "context_menu": { "quick_actions": { diff --git a/locales/pt.json b/locales/pt.json index 45e90f10..dd4e4e95 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -100,17 +100,15 @@ "title": "AI no dispositivo, construído apenas para você" }, "guide": { - "already_installed": "Já instalou e está executando o Ollama?", + "already_installed": "Já instalou e executou {endpointType}?", "features": { "1": "Execute modelos avançados como gpt-oss, DeepSeek, Qwen, Llama", "2": "Personalize e alterne entre modelos com controle total", "3": "Seus dados permanecem privados no seu dispositivo" }, "follow_our_tutorial": "Siga nosso guia de instalação", - "get_ollama": "Obter Ollama", - "install_desc": "Instale o Ollama para configurar sua IA local.", + "install_desc": "Como você quer executar IA localmente?", "need_help": "Precisa de ajuda?", - "ollama_desc": "Desbloqueie todo o poder da IA local com Ollama.", "setup": "Configurar", "step1": "Etapa 1", "download_model_to_begin": "Baixe um modelo para começar.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Não tem certeza de qual escolher?", "learn_about_models": "Aprenda sobre modelos", "looking_for_more_options": "🔍 Procurando por mais opções?", - "browse_more_models": "Navegue por mais modelos" + "browse_more_models": "Navegue por mais modelos", + "get_endpoint": "Obter {endpointType}", + "lm_studio_select_desc": "Aplicativo de Desktop Fácil de Usar", + "ollama_select_desc": "Ferramenta de Desenvolvedor Profissional", + "unlock_full_power_with": "Desbloqueie todo o poder da IA local com {endpointType}." }, "webllm_tutorial": { "desc": "Use um modelo leve (QWEN 0.6B) diretamente no seu navegador - sem configuração, sem espera.", @@ -132,7 +134,8 @@ "title": "👋 Bem-vindo ao **NativeMind**", "body": "O NativeMind é uma extensão de navegador com foco em privacidade que ajuda você a conversar, pesquisar e traduzir - tudo alimentado por modelos de linguagem no dispositivo.\n\n\nAqui está o que você pode fazer com o NativeMind:\n\n\n\n- Converse através de várias abas para acompanhar diferentes páginas.\n\n- Pesquise na web diretamente no chat para obter mais contexto.\n\n- Clique com o botão direito para traduzir instantaneamente qualquer parte da página.\n\n- Alterne ou baixe modelos a qualquer momento nas configurações.\n\n\nVocê pode começar experimentando as ações rápidas abaixo." }, - "ollama_is_running": "Ollama está executando" + "ollama_is_running": "Ollama está executando", + "backend_is_running": "{endpointType} está em execução" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Modelos", "discover_more": "Descubra mais modelos", "no_model": "⚠️ Nenhum modelo", - "add_model_to_start": "Adicionar um modelo para começar" + "add_model_to_start": "Adicionar um modelo para começar", + "ollama_models": "Modelos Ollama ({count})", + "lmstudio_models": "LM Studio Modelos ({count})" }, "ollama": { "already_installed": "Já instalou e está executando o Ollama?", @@ -182,19 +187,15 @@ "unload": "Descarregar", "expires_in": "Expira em {duration}", "running": "Executando", - "running_models": "Modelos em Execução" + "running_models": "Modelos em Execução", + "model_management": "Gestão de Modelos", + "downloaded_models": "Modelos Baixados" }, "prompts": { "chat_system_prompt": "Prompt de sistema do chat", "title": "Prompts", "translation_system_prompt": "Prompt do sistema de tradução" }, - "provider": { - "title": "Conexão" - }, - "provider_model": { - "title": "Configurar Ollama" - }, "quick_actions": { "description": "Configure ações rápidas para executar seus prompts favoritos mais rapidamente - em um novo chat ou no menu de contexto.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} quant", "thinking": "Pensando", "no_active_models": "Ollama conectado, mas nenhum modelo ativo", - "not_connected_to_ollama": "Não conectado ao Ollama" - } + "not_connected_to_ollama": "Não conectado ao Ollama", + "lm_studio_no_active_models": "LM Studio conectado, mas nenhum modelo ativo", + "not_connected_to_lm_studio": "Não conectado ao Ollama" + }, + "delete_model_confirm": "Tem certeza de que deseja excluir o modelo \"{model}\"?" }, "title": "Configurações", "model_downloader": { @@ -248,7 +252,8 @@ "download": "Baixar", "retry": "Tentar novamente", "unable_to_download": "Não foi possível iniciar o download", - "could_not_connect_ollama": "Não foi possível conectar ao Ollama. Por favor, certifique-se de que o Ollama está em execução e tente novamente." + "could_not_connect_ollama": "Não foi possível conectar ao Ollama. Por favor, certifique-se de que o Ollama está em execução e tente novamente.", + "could_not_connect": "Não foi possível conectar ao {endpointType}. Certifique-se de que o {endpointType} está em execução e tente novamente." }, "webllm_downloader": { "description": "Para usar o modo Local, você precisa baixar o modelo {model} ({size}). Gostaria de baixá-lo agora?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Estrelas no GitHub", "goto_website": "Ir para o Website" - } + }, + "providers": { + "lm_studio": { + "title": "Configurar LM Studio", + "already_installed": "Já instalou e executou o LM Studio?", + "model_management_desc": "O download, carregamento e exclusão do modelo devem ser feitos no aplicativo desktop do LM Studio. Isso mostra apenas o status de execução atual." + }, + "ollama": { + "title": "Configurar Ollama" + } + }, + "get_lm_studio": "Obter LM Studio", + "delete": "Excluir", + "lm_studio": { + "server_address": "Endereço do Servidor", + "server_address_desc": "endpoint da API do servidor LM Studio" + }, + "webllm_desc_lm_studio": "Você está agora usando WebLLM: qwen3:0.6b para teste rápido. Para suporte completo de modelos e melhor desempenho, por favor instale o LM Studio." }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "Ops! Algo deu errado. Por favor, verifique sua conexão com o Ollama nas configurações e tente novamente.", - "model_request_error": "Ops! Algo deu errado. Por favor, verifique sua conexão com o Ollama nas configurações e tente novamente.", - "model_request_timeout": "Tempo limite da solicitação esgotado. Verifique sua conexão com o Ollama ou considere iniciar uma nova sessão, pois contextos longos podem afetar os tempos de resposta.", + "model_not_found": "Ops! Algo deu errado. Por favor, verifique sua conexão {endpointType} nas configurações e tente novamente.", + "model_request_error": "Ops! Algo deu errado. Por favor, verifique sua conexão {endpointType} nas configurações e tente novamente.", + "model_request_timeout": "Tempo limite da solicitação esgotado, verifique sua conexão {endpointType} ou considere iniciar uma nova sessão, pois contextos longos podem afetar os tempos de resposta.", "timeout_error": "Operação expirou: {message}", "unknown_error": "Ocorreu um erro inesperado: {message}", "webllm_not_supported": "WebLLM não é suportado no seu dispositivo.", diff --git a/locales/ru.json b/locales/ru.json index 7659bae8..c5dd560a 100644 --- a/locales/ru.json +++ b/locales/ru.json @@ -100,17 +100,15 @@ "title": "ИИ на устройстве, созданный специально для вас" }, "guide": { - "already_installed": "Уже установлен и запущен Ollama?", + "already_installed": "Уже установлен и запущен {endpointType}?", "features": { "1": "Запускайте продвинутые модели, такие как gpt-oss, DeepSeek, Qwen, Llama", "2": "Настраивайте и переключайте модели с полным контролем", "3": "Ваши данные остаются приватными на вашем устройстве" }, "follow_our_tutorial": "Следуйте нашему руководству по установке", - "get_ollama": "Получить Ollama", - "install_desc": "Установите Ollama, чтобы настроить локальный ИИ.", + "install_desc": "Как вы хотите запустить ИИ локально?", "need_help": "Нужна помощь?", - "ollama_desc": "Раскройте полную мощь локального ИИ с Ollama.", "setup": "Настройка", "step1": "Шаг 1", "download_model_to_begin": "Загрузите модель, чтобы начать.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Не уверены, какую выбрать?", "learn_about_models": "Узнать о моделях", "looking_for_more_options": "🔍 Ищете больше вариантов?", - "browse_more_models": "Просмотреть больше моделей" + "browse_more_models": "Просмотреть больше моделей", + "get_endpoint": "Получить {endpointType}", + "lm_studio_select_desc": "Простое в использовании настольное приложение", + "ollama_select_desc": "Профессиональный инструмент разработчика", + "unlock_full_power_with": "Разблокируйте полную мощь локального ИИ с {endpointType}." }, "webllm_tutorial": { "desc": "Используйте легкую модель (QWEN 0,6B) прямо в вашем браузере - без настройки, без ожидания.", @@ -132,7 +134,8 @@ "title": "👋 Добро пожаловать в **NativeMind**", "body": "NativeMind — это расширение браузера, ориентированное на приватность, которое помогает вам общаться, искать и переводить с помощью языковых моделей, работающих на вашем устройстве.\n\n\nВот что вы можете делать с NativeMind:\n\n\n\n- Общаться через несколько вкладок, чтобы отслеживать разные страницы.\n\n- Искать в интернете прямо в чате для получения дополнительного контекста.\n\n- Щёлкнуть правой кнопкой мыши, чтобы мгновенно перевести любую часть страницы.\n\n- Переключать или загружать модели в любое время в настройках.\n\n\nНачните с использования быстрых действий ниже." }, - "ollama_is_running": "Ollama запущена" + "ollama_is_running": "Ollama запущена", + "backend_is_running": "{endpointType} запущен" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Модели", "discover_more": "Откройте для себя больше моделей", "no_model": "⚠️ Модель отсутствует", - "add_model_to_start": "Добавьте модель для начала" + "add_model_to_start": "Добавьте модель для начала", + "ollama_models": "Модели Ollama ({count})", + "lmstudio_models": "LM Studio Модели ({count})" }, "ollama": { "already_installed": "Уже установлен и запущен Ollama?", @@ -182,19 +187,15 @@ "unload": "Выгрузить", "expires_in": "Истекает через {duration}", "running": "Выполняется", - "running_models": "Работающие модели" + "running_models": "Работающие модели", + "model_management": "Управление моделями", + "downloaded_models": "Загруженные модели" }, "prompts": { "chat_system_prompt": "Системная подсказка для чата", "title": "Подсказки", "translation_system_prompt": "Системный промпт для перевода" }, - "provider": { - "title": "Соединение" - }, - "provider_model": { - "title": "Настроить Ollama" - }, "quick_actions": { "description": "Настройте быстрые действия для запуска ваших любимых подсказок из нового чата или через контекстное меню.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} квант", "thinking": "Мышление", "no_active_models": "Ollama подключен, но нет активных моделей", - "not_connected_to_ollama": "Не подключено к Ollama" - } + "not_connected_to_ollama": "Не подключено к Ollama", + "lm_studio_no_active_models": "LM Studio подключена, но нет активных моделей", + "not_connected_to_lm_studio": "Не подключен к Ollama" + }, + "delete_model_confirm": "Вы уверены, что хотите удалить модель \"{model}\"?" }, "title": "Настройки", "model_downloader": { @@ -248,7 +252,8 @@ "download": "Скачать", "retry": "Повторить", "unable_to_download": "Невозможно начать загрузку", - "could_not_connect_ollama": "Не удалось подключиться к Ollama. Пожалуйста, убедитесь, что Ollama запущена, и попробуйте снова." + "could_not_connect_ollama": "Не удалось подключиться к Ollama. Пожалуйста, убедитесь, что Ollama запущена, и попробуйте снова.", + "could_not_connect": "Не удалось подключиться к {endpointType}. Убедитесь, что {endpointType} запущен, и повторите попытку." }, "webllm_downloader": { "description": "Для использования локального режима необходимо загрузить модель {model} ({size}). Хотите загрузить её сейчас?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Звезды на GitHub", "goto_website": "Перейти на сайт" - } + }, + "providers": { + "lm_studio": { + "title": "Настроить LM Studio", + "already_installed": "Уже установили и запустили LM Studio?", + "model_management_desc": "Загрузка, загрузка и удаление модели должны выполняться в настольном приложении LM Studio. Здесь отображается только текущий статус работы." + }, + "ollama": { + "title": "Настроить Ollama" + } + }, + "get_lm_studio": "Получить LM Studio", + "delete": "Удалить", + "lm_studio": { + "server_address": "Адрес сервера", + "server_address_desc": "Конечная точка API сервера LM Studio" + }, + "webllm_desc_lm_studio": "Вы сейчас используете WebLLM: qwen3:0.6b для быстрого ознакомления. Для полной поддержки модели и лучшей производительности, пожалуйста, установите LM Studio." }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "Упс! Что-то пошло не так. Пожалуйста, проверьте соединение с Ollama в настройках и попробуйте ещё раз.", - "model_request_error": "Упс! Что-то пошло не так. Пожалуйста, проверьте соединение с Ollama в настройках и попробуйте ещё раз.", - "model_request_timeout": "Время ожидания запроса истекло. Проверьте соединение с Ollama или рассмотрите возможность начала нового сеанса, так как большие контексты могут влиять на время отклика.", + "model_not_found": "Упс! Что-то пошло не так. Пожалуйста, проверьте ваше {endpointType} соединение в настройках и попробуйте снова.", + "model_request_error": "Упс! Что-то пошло не так. Пожалуйста, проверьте ваше {endpointType} соединение в настройках и попробуйте снова.", + "model_request_timeout": "Превышено время ожидания запроса, проверьте ваше {endpointType} соединение или рассмотрите возможность начать новую сессию, поскольку длинные контексты могут влиять на время отклика.", "timeout_error": "Время операции истекло: {message}", "unknown_error": "Произошла неожиданная ошибка: {message}", "webllm_not_supported": "WebLLM не поддерживается на вашем устройстве.", diff --git a/locales/th.json b/locales/th.json index 32ad5bf6..3442f27f 100644 --- a/locales/th.json +++ b/locales/th.json @@ -100,17 +100,15 @@ "title": "AI บนอุปกรณ์ สร้างขึ้นเพื่อคุณ" }, "guide": { - "already_installed": "ได้ติดตั้งและกำลังใช้งาน Ollama อยู่แล้ว?", + "already_installed": "ติดตั้งและเรียกใช้ {endpointType} แล้วหรือยัง?", "features": { "1": "เรียกใช้โมเดลขั้นสูงเช่น gpt-oss, DeepSeek, Qwen, Llama", "2": "ปรับแต่งและสลับโมเดลได้อย่างเต็มรูปแบบ", "3": "ข้อมูลของคุณปลอดภัยอยู่บนอุปกรณ์ของคุณเท่านั้น" }, "follow_our_tutorial": "ทำตามคู่มือการติดตั้งของเรา", - "get_ollama": "รับ Ollama", - "install_desc": "ติดตั้ง Ollama เพื่อตั้งค่า AI ในพื้นที่ของคุณ", + "install_desc": "คุณต้องการรัน AI ในเครื่องอย่างไร?", "need_help": "ต้องการความช่วยเหลือ?", - "ollama_desc": "ปลดล็อกพลัง AI ท้องถิ่นเต็มรูปแบบด้วย Ollama", "setup": "การตั้งค่า", "step1": "ขั้นตอนที่ 1", "download_model_to_begin": "ดาวน์โหลดโมเดลเพื่อเริ่มต้น", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 ไม่แน่ใจว่าจะเลือกอันไหน?", "learn_about_models": "เรียนรู้เกี่ยวกับโมเดล", "looking_for_more_options": "🔍 หาตัวเลือกเพิ่มเติม?", - "browse_more_models": "เรียกดูโมเดลเพิ่มเติม" + "browse_more_models": "เรียกดูโมเดลเพิ่มเติม", + "get_endpoint": "รับ {endpointType}", + "lm_studio_select_desc": "แอปเดสก์ท็อปที่ใช้งานง่าย", + "ollama_select_desc": "เครื่องมือสำหรับนักพัฒนาระดับมืออาชีพ", + "unlock_full_power_with": "ปลดล็อกพลังเต็มของ AI ในเครื่องด้วย {endpointType}" }, "webllm_tutorial": { "desc": "ใช้โมเดลขนาดเล็ก (QWEN 0.6B) ในเบราว์เซอร์ของคุณ - ไม่ต้องตั้งค่า ไม่ต้องรอ", @@ -132,7 +134,8 @@ "title": "👋 ยินดีต้อนรับสู่ **NativeMind**", "body": "NativeMind คือส่วนขยายเบราว์เซอร์ AI ที่เน้นความเป็นส่วนตัวรายแรกที่ช่วยให้คุณสามารถแชท ค้นหา และแปลภาษา ด้วยพลังของโมเดลภาษาที่ทำงานบนอุปกรณ์ของคุณ\n\n\nสิ่งที่คุณสามารถทำได้กับ NativeMind:\n\n\n\n- แชทข้ามหลายแท็บเพื่อติดตามเนื้อหาหลายหน้า\n\n- ค้นหาเว็บโดยตรงในการแชทเพื่อเพิ่มบริบท\n\n- คลิกขวาเพื่อแปลเนื้อหาส่วนใดก็ได้ของหน้าเว็บทันที\n\n- เปลี่ยนหรือดาวน์โหลดโมเดลใหม่ได้ตลอดเวลาในการตั้งค่า\n\n\nเริ่มต้นใช้งานได้ทันทีด้วยการลองใช้คำสั่งด่วนด้านล่าง" }, - "ollama_is_running": "Ollama กำลังทำงานอยู่" + "ollama_is_running": "Ollama กำลังทำงานอยู่", + "backend_is_running": "{endpointType} กำลังทำงาน" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "โมเดล", "discover_more": "ค้นหาโมเดลเพิ่มเติม", "no_model": "⚠️ ไม่มีโมเดล", - "add_model_to_start": "เพิ่มโมเดลเพื่อเริ่มต้น" + "add_model_to_start": "เพิ่มโมเดลเพื่อเริ่มต้น", + "ollama_models": "โมเดล Ollama ({count})", + "lmstudio_models": "LM Studio โมเดล ({count})" }, "ollama": { "already_installed": "ติดตั้งและใช้งาน Ollama อยู่แล้ว?", @@ -182,19 +187,15 @@ "unload": "ขนถ่าย", "expires_in": "หมดอายุใน {duration}", "running": "กำลังทำงาน", - "running_models": "โมเดลที่กำลังทำงาน" + "running_models": "โมเดลที่กำลังทำงาน", + "model_management": "การจัดการโมเดล", + "downloaded_models": "โมเดลที่ดาวน์โหลดแล้ว" }, "prompts": { "chat_system_prompt": "ระบบแชท", "title": "แจ้ง", "translation_system_prompt": "พรอมต์ระบบการแปล" }, - "provider": { - "title": "การเชื่อมต่อ" - }, - "provider_model": { - "title": "กำหนดค่า Ollama" - }, "quick_actions": { "description": "ตั้งค่าการกระทำที่รวดเร็วเพื่อเรียกใช้พรอมต์ที่คุณชื่นชอบเร็วขึ้น-จากการแชทใหม่หรือเมนูคลิกขวา", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} ควอนต์", "thinking": "กำลังคิด", "no_active_models": "Ollama เชื่อมต่อแล้ว แต่ไม่มีโมเดลที่ใช้งานอยู่", - "not_connected_to_ollama": "ไม่ได้เชื่อมต่อกับ Ollama" - } + "not_connected_to_ollama": "ไม่ได้เชื่อมต่อกับ Ollama", + "lm_studio_no_active_models": "LM Studio เชื่อมต่อแล้ว แต่ไม่มีโมเดลที่ใช้งานอยู่", + "not_connected_to_lm_studio": "ไม่ได้เชื่อมต่อกับ Ollama" + }, + "delete_model_confirm": "คุณแน่ใจหรือไม่ว่าต้องการลบโมเดล \"{model}\"?" }, "title": "การตั้งค่า", "model_downloader": { @@ -248,7 +252,8 @@ "download": "ดาวน์โหลด", "retry": "ลองใหม่", "unable_to_download": "ไม่สามารถเริ่มการดาวน์โหลดได้", - "could_not_connect_ollama": "เราไม่สามารถเชื่อมต่อกับ Ollama ได้ กรุณาตรวจสอบให้แน่ใจว่า Ollama กำลังทำงานอยู่และลองใหม่อีกครั้ง" + "could_not_connect_ollama": "เราไม่สามารถเชื่อมต่อกับ Ollama ได้ กรุณาตรวจสอบให้แน่ใจว่า Ollama กำลังทำงานอยู่และลองใหม่อีกครั้ง", + "could_not_connect": "เราไม่สามารถเชื่อมต่อกับ {endpointType} ได้ โปรดตรวจสอบให้แน่ใจว่า {endpointType} กำลังทำงานอยู่และลองใหม่อีกครั้ง" }, "webllm_downloader": { "description": "หากต้องการใช้โหมดท้องถิ่น คุณจำเป็นต้องดาวน์โหลดโมเดล {model} ({size}) คุณต้องการดาวน์โหลดตอนนี้หรือไม่?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "ให้ดาวบน GitHub", "goto_website": "ไปที่เว็บไซต์" - } + }, + "providers": { + "lm_studio": { + "title": "กำหนดค่า LM Studio", + "already_installed": "ติดตั้งและรัน LM Studio แล้วหรือยัง?", + "model_management_desc": "การดาวน์โหลด การโหลด และการลบโมเดลต้องทำในแอป LM Studio บนเดสก์ท็อป หน้านี้แสดงเฉพาะสถานะการทำงานปัจจุบันเท่านั้น" + }, + "ollama": { + "title": "กำหนดค่า Ollama" + } + }, + "get_lm_studio": "ดาวน์โหลด LM Studio", + "delete": "ลบ", + "lm_studio": { + "server_address": "ที่อยู่เซิร์ฟเวอร์", + "server_address_desc": "จุดปลายทาง API ของเซิร์ฟเวอร์ LM Studio" + }, + "webllm_desc_lm_studio": "คุณกำลังใช้ WebLLM: qwen3:0.6b สำหรับการทดลองใช้งานอย่างรวดเร็ว สำหรับการรองรับโมเดลแบบเต็มและประสิทธิภาพที่ดีกว่า กรุณาติดตั้ง LM Studio" }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "อุปส์! มีบางอย่างผิดพลาด โปรดตรวจสอบการเชื่อมต่อ Ollama ของคุณในการตั้งค่าและลองอีกครั้ง", - "model_request_error": "อุปส์! มีบางอย่างผิดพลาด โปรดตรวจสอบการเชื่อมต่อ Ollama ของคุณในการตั้งค่าและลองอีกครั้ง", - "model_request_timeout": "หมดเวลาการตอบสนอง โปรดตรวจสอบการเชื่อมต่อ Ollama ของคุณ หรือพิจารณาเริ่มต้นเซสชั่นใหม่ เนื่องจากบริบทที่ยาวอาจส่งผลต่อเวลาการตอบสนอง", + "model_not_found": "อุ๊ปส์! มีบางอย่างผิดพลาด กรุณาตรวจสอบการเชื่อมต่อ {endpointType} ในการตั้งค่าและลองใหม่อีกครั้ง", + "model_request_error": "อุ๊ปส์! มีบางอย่างผิดพลาด กรุณาตรวจสอบการเชื่อมต่อ {endpointType} ในการตั้งค่าและลองใหม่อีกครั้ง", + "model_request_timeout": "คำขอหมดเวลา กรุณาตรวจสอบการเชื่อมต่อ {endpointType} ของคุณ หรือพิจารณาเริ่มเซสชันใหม่ เนื่องจากบริบทที่ยาวอาจส่งผลต่อเวลาในการตอบสนอง", "timeout_error": "การดำเนินการหมดเวลา: {message}", "unknown_error": "เกิดข้อผิดพลาดที่ไม่คาดคิด: {message}", "webllm_not_supported": "WebLLM ไม่รองรับบนอุปกรณ์ของคุณ", diff --git a/locales/vi.json b/locales/vi.json index e256dda6..94b915c7 100644 --- a/locales/vi.json +++ b/locales/vi.json @@ -100,17 +100,15 @@ "title": "AI trên thiết bị, được xây dựng chỉ dành cho bạn" }, "guide": { - "already_installed": "Bạn đã cài đặt và chạy Ollama?", + "already_installed": "Đã cài đặt và chạy {endpointType}?", "features": { "1": "Chạy các mô hình tiên tiến như gpt-oss, DeepSeek, Qwen, Llama", "2": "Tùy chỉnh và chuyển đổi các mô hình với toàn bộ điều khiển", "3": "Dữ liệu của bạn vẫn riêng tư, trên thiết bị của bạn" }, "follow_our_tutorial": "Thực hiện theo hướng dẫn cài đặt của chúng tôi", - "get_ollama": "Nhận Ollama", - "install_desc": "Cài đặt Ollama để thiết lập AI cục bộ của bạn.", + "install_desc": "Bạn muốn chạy AI cục bộ như thế nào?", "need_help": "Cần giúp đỡ?", - "ollama_desc": "Mở khóa toàn bộ sức mạnh của AI trên thiết bị với Ollama.", "setup": "Cài đặt", "step1": "Bước 1", "download_model_to_begin": "Tải xuống một mô hình để bắt đầu.", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 Không chắc chọn cái nào?", "learn_about_models": "Tìm hiểu về các mô hình", "looking_for_more_options": "🔍 Đang tìm kiếm thêm tùy chọn?", - "browse_more_models": "Duyệt qua các mô hình khác" + "browse_more_models": "Duyệt qua các mô hình khác", + "get_endpoint": "Lấy {endpointType}", + "lm_studio_select_desc": "Ứng dụng Desktop dễ sử dụng", + "ollama_select_desc": "Công cụ Phát triển Chuyên nghiệp", + "unlock_full_power_with": "Mở khóa toàn bộ sức mạnh AI cục bộ với {endpointType}." }, "webllm_tutorial": { "desc": "Sử dụng mô hình nhẹ (QWEN 0,6B) ngay trong trình duyệt của bạn - không thiết lập, không chờ đợi.", @@ -132,7 +134,8 @@ "title": "👋 Chào mừng bạn đến với **NativeMind**", "body": "NativeMind là tiện ích mở rộng trình duyệt AI đầu tiên riêng tư giúp bạn trò chuyện, tìm kiếm và dịch - tất cả được cung cấp bởi các mô hình ngôn ngữ trên thiết bị.\n\nĐây là những gì bạn có thể làm với NativeMind:\n\n- Trò chuyện qua nhiều tab để theo dõi các trang khác nhau.\n- Tìm kiếm web trực tiếp trong trò chuyện để biết thêm bối cảnh.\n- Nhấp chuột phải để dịch bất kỳ phần nào của trang ngay lập tức.\n- Chuyển đổi hoặc tải xuống mô hình bất cứ lúc nào trong cài đặt.\n\nBạn có thể bắt đầu bằng cách thử các thao tác nhanh dưới đây." }, - "ollama_is_running": "Ollama đang chạy" + "ollama_is_running": "Ollama đang chạy", + "backend_is_running": "{endpointType} đang chạy" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "Mô hình", "discover_more": "Khám phá thêm các mô hình", "no_model": "⚠️ Không có mô hình", - "add_model_to_start": "Thêm một mô hình để bắt đầu" + "add_model_to_start": "Thêm một mô hình để bắt đầu", + "ollama_models": "Các Mô hình Ollama ({count})", + "lmstudio_models": "LM Studio Models ({count}) → Các Mô hình LM Studio ({count})" }, "ollama": { "already_installed": "Bạn đã cài đặt và chạy Ollama?", @@ -182,19 +187,15 @@ "unload": "Dỡ tải", "expires_in": "Hết hạn trong {duration}", "running": "Chạy", - "running_models": "Mô hình đang chạy" + "running_models": "Mô hình đang chạy", + "model_management": "Quản lý Mô hình", + "downloaded_models": "Các Mô hình Đã tải xuống" }, "prompts": { "chat_system_prompt": "Nhắc hệ thống trò chuyện", "title": "Nhắc nhở", "translation_system_prompt": "Prompt hệ thống dịch thuật" }, - "provider": { - "title": "Kết nối" - }, - "provider_model": { - "title": "Cấu hình Ollama" - }, "quick_actions": { "description": "Thiết lập các thao tác nhanh để chạy lời nhắc yêu thích của bạn nhanh hơn - từ trò chuyện mới hoặc menu chuột phải.", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} định lượng", "thinking": "Suy nghĩ", "no_active_models": "Ollama đã kết nối, nhưng không có mô hình nào đang hoạt động", - "not_connected_to_ollama": "Không kết nối với Ollama" - } + "not_connected_to_ollama": "Không kết nối với Ollama", + "lm_studio_no_active_models": "LM Studio đã kết nối, nhưng không có mô hình nào đang hoạt động", + "not_connected_to_lm_studio": "Không kết nối với Ollama" + }, + "delete_model_confirm": "Bạn có chắc chắn muốn xóa mô hình \"{model}\" không?" }, "title": "Cài đặt", "model_downloader": { @@ -248,7 +252,8 @@ "download": "Tải xuống", "retry": "Thử lại", "unable_to_download": "Không thể bắt đầu tải xuống", - "could_not_connect_ollama": "Chúng tôi không thể kết nối với Ollama. Vui lòng đảm bảo Ollama đang chạy và thử lại." + "could_not_connect_ollama": "Chúng tôi không thể kết nối với Ollama. Vui lòng đảm bảo Ollama đang chạy và thử lại.", + "could_not_connect": "Chúng tôi không thể kết nối tới {endpointType}. Vui lòng đảm bảo {endpointType} đang chạy và thử lại." }, "webllm_downloader": { "description": "Để sử dụng chế độ Cục bộ, bạn cần tải xuống mô hình {model} ({size}). Bạn có muốn tải xuống ngay bây giờ không?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Sao trên GitHub", "goto_website": "Đi đến Trang web" - } + }, + "providers": { + "lm_studio": { + "title": "Cấu hình LM Studio", + "already_installed": "Đã cài đặt và chạy LM Studio chưa?", + "model_management_desc": "Việc tải xuống, tải và xóa mô hình phải được thực hiện trong ứng dụng desktop LM Studio. Đây chỉ hiển thị trạng thái đang chạy hiện tại." + }, + "ollama": { + "title": "Cấu hình Ollama" + } + }, + "get_lm_studio": "Tải LM Studio", + "delete": "Xóa", + "lm_studio": { + "server_address": "Địa chỉ máy chủ", + "server_address_desc": "Điểm cuối API máy chủ LM Studio" + }, + "webllm_desc_lm_studio": "Bạn hiện đang sử dụng WebLLM: qwen3:0.6b để dùng thử nhanh. Để có hỗ trợ mô hình đầy đủ và hiệu suất tốt hơn, vui lòng cài đặt LM Studio." }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "Ối! Đã xảy ra lỗi. Vui lòng kiểm tra kết nối Ollama của bạn trong cài đặt và thử lại.", - "model_request_error": "Ối! Đã xảy ra lỗi. Vui lòng kiểm tra kết nối Ollama của bạn trong cài đặt và thử lại.", - "model_request_timeout": "Yêu cầu đã hết thời gian. Vui lòng kiểm tra kết nối Ollama của bạn hoặc xem xét bắt đầu một phiên mới, vì bối cảnh dài có thể ảnh hưởng đến thời gian phản hồi.", + "model_not_found": "Rất tiếc! Đã xảy ra lỗi. Vui lòng kiểm tra kết nối {endpointType} trong cài đặt và thử lại.", + "model_request_error": "Rất tiếc! Đã xảy ra lỗi. Vui lòng kiểm tra kết nối {endpointType} trong cài đặt và thử lại.", + "model_request_timeout": "Hết thời gian chờ yêu cầu, vui lòng kiểm tra kết nối {endpointType} của bạn hoặc cân nhắc bắt đầu phiên mới vì ngữ cảnh dài có thể ảnh hưởng đến thời gian phản hồi.", "timeout_error": "Thao tác đã hết thời gian: {message}", "unknown_error": "Đã xảy ra lỗi không xác định: {message}", "webllm_not_supported": "WebLLM không được hỗ trợ trên thiết bị của bạn.", diff --git a/locales/zh-CN.json b/locales/zh-CN.json index 699111a6..4dd460cd 100644 --- a/locales/zh-CN.json +++ b/locales/zh-CN.json @@ -37,7 +37,9 @@ "unload": "卸载", "expires_in": "在 {duration} 后过期", "running": "运行中", - "running_models": "运行中的模型" + "running_models": "运行中的模型", + "model_management": "模型管理", + "downloaded_models": "已下载的模型" }, "choose_model": "选择模型", "download": "下载", @@ -50,7 +52,9 @@ "title": "模型", "discover_more": "发现更多模型", "no_model": "⚠️ 无模型", - "add_model_to_start": "添加模型以开始" + "add_model_to_start": "添加模型以开始", + "ollama_models": "Ollama 模型 ({count})", + "lmstudio_models": "LM Studio 模型 ({count})" }, "quick_actions": { "title": "自定义快捷操作", @@ -70,12 +74,6 @@ "join_waitlist_link": "加入等待名单", "discord": "Discord" }, - "provider": { - "title": "连接" - }, - "provider_model": { - "title": "配置 Ollama" - }, "cache": { "title": "翻译缓存", "enable": "启用缓存", @@ -101,8 +99,11 @@ "quant": "{level} 量化", "thinking": "推理", "no_active_models": "Ollama 已连接,但没有活跃的模型", - "not_connected_to_ollama": "未连接到 Ollama" - } + "not_connected_to_ollama": "未连接到 Ollama", + "lm_studio_no_active_models": "LM Studio 已连接,但没有活跃的模型", + "not_connected_to_lm_studio": "未连接到 Ollama" + }, + "delete_model_confirm": "您确定要删除模型\"{model}\"吗?" }, "title": "设置", "model_downloader": { @@ -113,7 +114,8 @@ "download": "下载", "retry": "重试", "unable_to_download": "无法开始下载", - "could_not_connect_ollama": "我们无法连接到 Ollama。请确保 Ollama 正在运行,然后重试。" + "could_not_connect_ollama": "我们无法连接到 {endpointType}。请确保 {endpointType} 正在运行,然后重试。", + "could_not_connect": "我们无法连接到 {endpointType}。请确保 {endpointType} 正在运行,然后重试。" }, "webllm_downloader": { "description": "要使用本地模式,您需要下载 {model} 模型({size})。您现在想要下载吗?" @@ -182,7 +184,24 @@ "header": { "starts_on_github": "Stars on GitHub", "goto_website": "前往网站" - } + }, + "providers": { + "lm_studio": { + "title": "配置 LM Studio", + "already_installed": "已经安装并运行了 LM Studio?", + "model_management_desc": "模型下载、加载和删除必须在 LM Studio 桌面应用程序中完成。这里只显示当前运行状态。" + }, + "ollama": { + "title": "配置 Ollama" + } + }, + "get_lm_studio": "获取 LM Studio", + "delete": "删除", + "lm_studio": { + "server_address": "服务器地址", + "server_address_desc": "LM Studio 服务器 API 端点" + }, + "webllm_desc_lm_studio": "您现在正在使用 WebLLM: qwen3:0.6b 进行快速试用。如需完整模型支持和更好的性能,请安装 LM Studio。" }, "chat": { "messages": { @@ -285,17 +304,15 @@ "title": "为您打造的本地AI助手" }, "guide": { - "already_installed": "已安装并运行Ollama?", + "already_installed": "已安装并运行 {endpointType}?", "features": { "1": "运行高级模型,如 gpt-oss、DeepSeek、Qwen、Llama", "2": "完全掌控模型的自定义和切换", "3": "您的数据始终保持在本地设备上" }, "follow_our_tutorial": "查看我们的安装指南", - "get_ollama": "获取Ollama", - "install_desc": "安装Ollama以设置您的本地AI助手。", + "install_desc": "您想如何在本地运行AI?", "need_help": "需要帮助?", - "ollama_desc": "借助Ollama释放AI的全部潜力。", "setup": "开始设置", "step1": "步骤1", "download_model_to_begin": "下载模型以开始。", @@ -305,7 +322,11 @@ "no_sure_which_one": "🤔 不确定选择哪一个?", "learn_about_models": "了解模型", "looking_for_more_options": "🔍 寻找更多选项?", - "browse_more_models": "浏览更多模型" + "browse_more_models": "浏览更多模型", + "get_endpoint": "获取 {endpointType}", + "lm_studio_select_desc": "易于使用的桌面应用程序", + "ollama_select_desc": "专业开发者工具", + "unlock_full_power_with": "使用 {endpointType} 释放本地 AI 的全部潜力。" }, "webllm_tutorial": { "desc": "在浏览器中使用轻量级模型(Qwen 0.6B)- 无需设置,即刻体验。", @@ -317,7 +338,8 @@ "body": "NativeMind是一款注重隐私的AI浏览器扩展,可帮助您聊天、搜索和翻译 — 所有功能均由本地设备上的语言模型提供支持。\n\n通过NativeMind,您可以:\n- 在多个标签页之间聊天,轻松跟踪不同页面内容。\n- 直接在聊天中搜索网页获取更多信息。\n- 右键点击即可立即翻译页面任何部分。\n- 在设置中随时切换或下载新模型。\n\n您可以从下方的快捷操作开始体验。", "title": "👋欢迎使用 **NativeMind**" }, - "ollama_is_running": "Ollama 正在运行" + "ollama_is_running": "Ollama 正在运行", + "backend_is_running": "{endpointType} 正在运行" }, "context_menu": { "quick_actions": { @@ -336,11 +358,11 @@ } }, "errors": { - "model_not_found": "哎呀!\n出了点问题。\n请在设置中检查您的Ollama连接,然后重试。", - "model_request_error": "哎呀!\n出了点问题。\n请在设置中检查您的Ollama连接,然后重试。", + "model_not_found": "哎呀!出了点问题。请在设置中检查您的 {endpointType} 连接并重试。", + "model_request_error": "哎呀!出了点问题。请在设置中检查您的 {endpointType} 连接并重试。", "timeout_error": "操作超时:{message}", "unknown_error": "发生意外错误:{message}", - "model_request_timeout": "请求超时,请检查您的Ollama连接或考虑启动新会话,因为长上下文可能会影响响应时间。", + "model_request_timeout": "请求超时,请检查您的{endpointType}连接或考虑开始新会话,因为长上下文可能会影响响应时间。", "webllm_not_supported": "WebLLM 在您的设备上不受支持。", "max_characters_error": "最多允许 {count} 个字符" }, diff --git a/locales/zh-TW.json b/locales/zh-TW.json index 34b2268a..ed446a26 100644 --- a/locales/zh-TW.json +++ b/locales/zh-TW.json @@ -100,17 +100,15 @@ "title": "裝置AI,專為您打造" }, "guide": { - "already_installed": "已經安裝並執行Ollama?", + "already_installed": "已安裝並執行 {endpointType}?", "features": { "1": "執行進階模型,如 gpt-oss、DeepSeek、Qwen、Llama", "2": "自訂和切換模型完全控制", "3": "您的資料在裝置上保持私密" }, "follow_our_tutorial": "參照我們的安裝指南", - "get_ollama": "獲取Ollama", - "install_desc": "安裝Ollama來設置您的本地AI。", + "install_desc": "您想如何在本地運行 AI?", "need_help": "需要幫助嗎?", - "ollama_desc": "用Ollama解鎖AI的全部力量。", "setup": "設置", "step1": "步驟1", "download_model_to_begin": "下載模型以開始。", @@ -120,7 +118,11 @@ "no_sure_which_one": "🤔 不確定要選擇哪一個?", "learn_about_models": "了解模型", "looking_for_more_options": "🔍 尋找更多選項?", - "browse_more_models": "瀏覽更多模型" + "browse_more_models": "瀏覽更多模型", + "get_endpoint": "取得 {endpointType}", + "lm_studio_select_desc": "易於使用的桌面應用程式", + "ollama_select_desc": "專業開發者工具", + "unlock_full_power_with": "使用 {endpointType} 解鎖完整的本地 AI 功能。" }, "webllm_tutorial": { "desc": "在瀏覽器中使用輕巧的模型(QWEN 0.6B) - 無需設置,無需等待。", @@ -132,7 +134,8 @@ "title": "👋歡迎來到 **NativeMind**", "body": "NativeMind是一個隱私優先的AI瀏覽器擴展程式,可協助您聊天、搜尋和翻譯 - 全部由裝置語言模型提供支援。\n\n這是您可以使用NativeMind來做的:\n- 跨多個分頁聊天以跟蹤不同頁面。\n- 直接在聊天中搜尋網路以獲取更多上下文。\n- 右鍵點擊​​以立即翻譯頁面的任何部分。\n- 在設置中隨時切換或下載模型。\n\n您可以從下面嘗試快捷操作開始。" }, - "ollama_is_running": "Ollama 正在運行" + "ollama_is_running": "Ollama 正在運行", + "backend_is_running": "{endpointType} 正在執行中" }, "settings": { "cache": { @@ -160,7 +163,9 @@ "title": "模型", "discover_more": "探索更多模型", "no_model": "⚠️ 沒有模型", - "add_model_to_start": "新增模型以開始" + "add_model_to_start": "新增模型以開始", + "ollama_models": "Ollama 模型 ({count})", + "lmstudio_models": "LM Studio 模型 ({count})" }, "ollama": { "already_installed": "已經安裝並執行Ollama?", @@ -182,19 +187,15 @@ "unload": "卸載", "expires_in": "於 {duration} 後到期", "running": "運行中", - "running_models": "運行中的模型" + "running_models": "運行中的模型", + "model_management": "模型管理", + "downloaded_models": "已下載的模型" }, "prompts": { "chat_system_prompt": "聊天系統提示詞", "title": "提示詞", "translation_system_prompt": "翻譯系統提示詞" }, - "provider": { - "title": "連接" - }, - "provider_model": { - "title": "配置 Ollama" - }, "quick_actions": { "description": "設置快捷操作以更快地執行您喜歡的提示詞 - 從新建聊天或右鍵選單中。", "edit": { @@ -236,8 +237,11 @@ "quant": "{level} 量化", "thinking": "推理", "no_active_models": "Ollama 已連接,但沒有啟用的模型", - "not_connected_to_ollama": "未連接到 Ollama" - } + "not_connected_to_ollama": "未連接到 Ollama", + "lm_studio_no_active_models": "LM Studio 已連接,但沒有啟用的模型", + "not_connected_to_lm_studio": "未連接到 Ollama" + }, + "delete_model_confirm": "您確定要刪除模型「{model}」嗎?" }, "title": "設定", "model_downloader": { @@ -248,7 +252,8 @@ "download": "下載", "retry": "重試", "unable_to_download": "無法開始下載", - "could_not_connect_ollama": "我們無法連接到 Ollama。請確保 Ollama 正在運行,然後再試一次。" + "could_not_connect_ollama": "我們無法連接到 Ollama。請確保 Ollama 正在運行,然後再試一次。", + "could_not_connect": "我們無法連接到 {endpointType}。請確保 {endpointType} 正在運行,然後再試一次。" }, "webllm_downloader": { "description": "要使用本機模式,您需要下載 {model} 模型({size})。您現在要下載嗎?" @@ -317,7 +322,24 @@ "header": { "starts_on_github": "Stars on GitHub", "goto_website": "前往網站" - } + }, + "providers": { + "lm_studio": { + "title": "配置 LM Studio", + "already_installed": "已經安裝並執行 LM Studio 了嗎?", + "model_management_desc": "模型下載、載入和刪除必須在 LM Studio 桌面應用程式中完成。這裡只顯示目前的運行狀態。" + }, + "ollama": { + "title": "配置 Ollama" + } + }, + "get_lm_studio": "取得 LM Studio", + "delete": "刪除", + "lm_studio": { + "server_address": "伺服器位址", + "server_address_desc": "LM Studio 伺服器 API 端點" + }, + "webllm_desc_lm_studio": "您現在正在使用 WebLLM: qwen3:0.6b 進行快速試用。如需完整模型支援和更佳效能,請安裝 LM Studio。" }, "context_menu": { "quick_actions": { @@ -336,9 +358,9 @@ } }, "errors": { - "model_not_found": "哎呀!出了點問題。請在設置中檢查您的Ollama連接,然後重試。", - "model_request_error": "哎呀!出了點問題。請在設置中檢查您的Ollama連接,然後重試。", - "model_request_timeout": "請求超時,請檢查您的Ollama連接或考慮啟動新會話,因為長上下文可能會影響響應時間。", + "model_not_found": "糟糕!發生錯誤。請檢查設定中的 {endpointType} 連線並重試。", + "model_request_error": "糟糕!發生錯誤。請檢查設定中的 {endpointType} 連線並重試。", + "model_request_timeout": "請求逾時,請檢查您的 {endpointType} 連線或考慮開始新的會話,因為長上下文可能會影響回應時間。", "timeout_error": "操作計時:{message}", "unknown_error": "發生意外錯誤:{message}", "webllm_not_supported": "WebLLM 在您的裝置上不受支援。", diff --git a/package.json b/package.json index 8f1a9370..1e079bd7 100644 --- a/package.json +++ b/package.json @@ -38,9 +38,9 @@ "compile": "vue-tsc --noEmit", "test:e2e": "playwright test --config=playwright.config.ts", "test:unit": "vitest run", - "dev": "wxt", - "dev:edge": "wxt -b edge", - "dev:firefox": "wxt -b firefox --mv3", + "dev": "NODE_ENV=development wxt", + "dev:edge": "NODE_ENV=development wxt -b edge", + "dev:firefox": "NODE_ENV=development wxt -b firefox --mv3", "postinstall": "wxt prepare", "release:beta": "release-it --preRelease=beta --config .release-it.beta.json", "release:prod": "release-it", @@ -53,7 +53,7 @@ "prepare": "husky" }, "dependencies": { - "@ai-sdk/openai": "^1.3.22", + "@ai-sdk/openai-compatible": "^0.2.5", "@ai-sdk/provider": "^1.1.3", "@ai-sdk/provider-utils": "^2.2.8", "@huggingface/transformers": "^3.5.2", @@ -64,10 +64,12 @@ "@jimp/js-png": "^1.6.0", "@jimp/js-tiff": "^1.6.0", "@jimp/plugin-resize": "^1.6.0", + "@lmstudio/sdk": "^1.5.0", "@mlc-ai/web-llm": "^0.2.79", "@mozilla/readability": "^0.6.0", "@tailwindcss/vite": "^4.1.7", "@vueuse/core": "^13.1.0", + "add": "^2.0.6", "ai": "^4.3.13", "base64-js": "^1.5.1", "best-effort-json-parser": "^1.1.3", @@ -103,8 +105,10 @@ "nanoid": "^5.1.5", "ollama": "^0.5.15", "openai": "^5.9.2", + "openai-harmony": "^0.4.0", "partial-json": "^0.1.7", "pinia": "^3.0.2", + "pnpm": "^10.15.1", "prettier-plugin-packagejson": "^2.5.11", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 51e364e5..0eb4fd92 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -8,9 +8,9 @@ importers: .: dependencies: - '@ai-sdk/openai': - specifier: ^1.3.22 - version: 1.3.22(zod@3.24.4) + '@ai-sdk/openai-compatible': + specifier: ^0.2.5 + version: 0.2.16(zod@3.24.4) '@ai-sdk/provider': specifier: ^1.1.3 version: 1.1.3 @@ -41,6 +41,9 @@ importers: '@jimp/plugin-resize': specifier: ^1.6.0 version: 1.6.0 + '@lmstudio/sdk': + specifier: ^1.5.0 + version: 1.5.0 '@mlc-ai/web-llm': specifier: ^0.2.79 version: 0.2.79 @@ -53,6 +56,9 @@ importers: '@vueuse/core': specifier: ^13.1.0 version: 13.1.0(vue@3.5.13(typescript@5.8.3)) + add: + specifier: ^2.0.6 + version: 2.0.6 ai: specifier: ^4.3.13 version: 4.3.13(react@19.1.0)(zod@3.24.4) @@ -158,12 +164,18 @@ importers: openai: specifier: ^5.9.2 version: 5.9.2(ws@8.18.1)(zod@3.24.4) + openai-harmony: + specifier: ^0.4.0 + version: 0.4.0(typescript@5.8.3) partial-json: specifier: ^0.1.7 version: 0.1.7 pinia: specifier: ^3.0.2 version: 3.0.2(typescript@5.8.3)(vue@3.5.13(typescript@5.8.3)) + pnpm: + specifier: ^10.15.1 + version: 10.15.1 prettier-plugin-packagejson: specifier: ^2.5.11 version: 2.5.11(prettier@3.5.3) @@ -345,8 +357,8 @@ packages: '@1natsu/wait-element@4.1.2': resolution: {integrity: sha512-qWxSJD+Q5b8bKOvESFifvfZ92DuMsY+03SBNjTO34ipJLP6mZ9yK4bQz/vlh48aEQXoJfaZBqUwKL5BdI5iiWw==} - '@ai-sdk/openai@1.3.22': - resolution: {integrity: sha512-QwA+2EkG0QyjVR+7h6FE7iOu2ivNqAVMm9UJZkVxxTk5OIq5fFJDTEI/zICEMuHImTTXR2JjsL6EirJ28Jc4cw==} + '@ai-sdk/openai-compatible@0.2.16': + resolution: {integrity: sha512-LkvfcM8slJedRyJa/MiMiaOzcMjV1zNDwzTHEGz7aAsgsQV0maLfmJRi/nuSwf5jmp0EouC+JXXDUj2l94HgQw==} engines: {node: '>=18'} peerDependencies: zod: ^3.0.0 @@ -1167,6 +1179,12 @@ packages: '@lit/reactive-element@2.1.1': resolution: {integrity: sha512-N+dm5PAYdQ8e6UlywyyrgI2t++wFGXfHx+dSJ1oBrg6FAxUj40jId++EaRm80MKX5JnlH1sBsyZ5h0bcZKemCg==} + '@lmstudio/lms-isomorphic@0.4.6': + resolution: {integrity: sha512-v0LIjXKnDe3Ff3XZO5eQjlVxTjleUHXaom14MV7QU9bvwaoo3l5p71+xJ3mmSaqZq370CQ6pTKCn1Bb7Jf+VwQ==} + + '@lmstudio/sdk@1.5.0': + resolution: {integrity: sha512-fdY12x4hb14PEjYijh7YeCqT1ZDY5Ok6VR4l4+E/dI+F6NW8oB+P83Sxed5vqE4XgTzbgyPuSR2ZbMNxxF+6jA==} + '@mediapipe/tasks-genai@0.10.14': resolution: {integrity: sha512-2XYVRJl/ylWLmBRzm7TVrl2yF8tKK2HCWypBno1KIVGPwY2i4mFlzt05itqBsTEwZbVYS/rZ15gLE9ypc6Nw3w==} @@ -1912,6 +1930,9 @@ packages: engines: {node: '>=0.4.0'} hasBin: true + add@2.0.6: + resolution: {integrity: sha512-j5QzrmsokwWWp6kUcJQySpbG+xfOBqqKnup3OIk1pz+kB/80SLorZ9V8zHFLO92Lcd+hbvq8bT+zOGoPkmBV0Q==} + adm-zip@0.5.16: resolution: {integrity: sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ==} engines: {node: '>=12.0'} @@ -3642,6 +3663,9 @@ packages: jsonfile@6.1.0: resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + jsonschema@1.5.0: + resolution: {integrity: sha512-K+A9hhqbn0f3pJX17Q/7H6yQfD/5OXgdrR5UE12gMXCiN9D5Xq2o5mddV2QEcX/bjla99ASsAAQUyMCCRWAEhw==} + jszip@3.10.1: resolution: {integrity: sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==} @@ -4311,6 +4335,12 @@ packages: resolution: {integrity: sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg==} engines: {node: '>=14.16'} + openai-harmony@0.4.0: + resolution: {integrity: sha512-GYpw+5ye1iIZA/hrM9coY1LnC67eK/MeTw6qN8wx5O+UKsiZhD5JV6YCDPaUEmxBXbiKyPwDF9mU2aUlrz3JEA==} + engines: {node: '>=18'} + peerDependencies: + typescript: ^5 + openai@5.9.2: resolution: {integrity: sha512-d7t/lRkwZpSwIk7GW3EHRSGAlsuoi1WL6VhCO02raEzZO2ahEVAbWn3WmOcpeh9zF6xF9weJXoVeDkWJRz+SHA==} hasBin: true @@ -4552,6 +4582,11 @@ packages: resolution: {integrity: sha512-LKWqWJRhstyYo9pGvgor/ivk2w94eSjE3RGVuzLGlr3NmD8bf7RcYGze1mNdEHRP6TRP6rMuDHk5t44hnTRyow==} engines: {node: '>=14.19.0'} + pnpm@10.15.1: + resolution: {integrity: sha512-NOU4wym1VTAUyo6PRTWZf5YYCh0PYUM5NXRJk1NQ2STiL4YUaCGRJk7DPRRirCFWGv+X9rsYBlNRwWLH6PbeZw==} + engines: {node: '>=18.12'} + hasBin: true + possible-typed-array-names@1.1.0: resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} engines: {node: '>= 0.4'} @@ -5941,7 +5976,7 @@ snapshots: defu: 6.1.4 many-keys-map: 2.0.1 - '@ai-sdk/openai@1.3.22(zod@3.24.4)': + '@ai-sdk/openai-compatible@0.2.16(zod@3.24.4)': dependencies: '@ai-sdk/provider': 1.1.3 '@ai-sdk/provider-utils': 2.2.8(zod@3.24.4) @@ -6786,6 +6821,24 @@ snapshots: dependencies: '@lit-labs/ssr-dom-shim': 1.4.0 + '@lmstudio/lms-isomorphic@0.4.6': + dependencies: + ws: 8.18.1 + transitivePeerDependencies: + - bufferutil + - utf-8-validate + + '@lmstudio/sdk@1.5.0': + dependencies: + '@lmstudio/lms-isomorphic': 0.4.6 + chalk: 4.1.2 + jsonschema: 1.5.0 + zod: 3.24.4 + zod-to-json-schema: 3.24.5(zod@3.24.4) + transitivePeerDependencies: + - bufferutil + - utf-8-validate + '@mediapipe/tasks-genai@0.10.14': {} '@mediapipe/tasks-text@0.10.14': {} @@ -7591,6 +7644,8 @@ snapshots: acorn@8.14.1: {} + add@2.0.6: {} + adm-zip@0.5.16: {} agent-base@7.1.3: {} @@ -9457,6 +9512,8 @@ snapshots: optionalDependencies: graceful-fs: 4.2.11 + jsonschema@1.5.0: {} + jszip@3.10.1: dependencies: lie: 3.3.0 @@ -10213,6 +10270,10 @@ snapshots: is-inside-container: 1.0.0 is-wsl: 2.2.0 + openai-harmony@0.4.0(typescript@5.8.3): + dependencies: + typescript: 5.8.3 + openai@5.9.2(ws@8.18.1)(zod@3.24.4): optionalDependencies: ws: 8.18.1 @@ -10475,6 +10536,8 @@ snapshots: pngjs@7.0.0: {} + pnpm@10.15.1: {} + possible-typed-array-names@1.1.0: {} postcss-selector-parser@6.1.2: diff --git a/types/lm-studio-models.ts b/types/lm-studio-models.ts new file mode 100644 index 00000000..b24e6c93 --- /dev/null +++ b/types/lm-studio-models.ts @@ -0,0 +1,3 @@ +import { LLMAdditionalInfo, LLMInfo, LLMInstanceAdditionalInfo, ModelInstanceInfoBase } from '@lmstudio/sdk' + +export type LMStudioModelInfo = LLMInfo & { instances?: (ModelInstanceInfoBase & LLMAdditionalInfo & LLMInstanceAdditionalInfo)[] } diff --git a/types/scroll-targets.ts b/types/scroll-targets.ts index 7a8f30a7..1d052ffb 100644 --- a/types/scroll-targets.ts +++ b/types/scroll-targets.ts @@ -1 +1 @@ -export type SettingsScrollTarget = 'quick-actions-block' | 'model-download-section' | 'server-address-section' +export type SettingsScrollTarget = 'quick-actions-block' | 'model-download-section' | 'ollama-server-address-section' | 'lm-studio-server-address-section' diff --git a/utils/constants.ts b/utils/constants.ts index 98852327..e7898b4f 100644 --- a/utils/constants.ts +++ b/utils/constants.ts @@ -3,6 +3,10 @@ export const OLLAMA_TUTORIAL_URL = 'https://nativemind.app/blog/tutorial/ollama- export const OLLAMA_DOWNLOAD_URL = 'https://ollama.com/download' export const OLLAMA_HOMEPAGE_URL = 'https://ollama.com' export const OLLAMA_SEARCH_URL = 'https://ollama.com/search' +export const LM_STUDIO_HOMEPAGE_URL = 'https://lmstudio.ai' +export const LM_STUDIO_DOWNLOAD_URL = 'https://lmstudio.ai/download' +export const LM_STUDIO_TUTORIAL_URL = 'https://nativemind.app/blog/tutorial/lm-studio-setup' +export const LM_STUDIO_SEARCH_URL = 'https://lmstudio.ai/models' export const NATIVEMIND_HOMEPAGE_URL = 'https://nativemind.app' export const NATIVEMIND_REPOSITORY_URL = 'https://github.com/NativeMindBrowser/NativeMindExtension' export const OLLAMA_SITE_DOWNLOAD_BUTTON_CLASS = 'nativemind-ollama-download-button' diff --git a/utils/error.ts b/utils/error.ts index 545e2758..fd5b45f6 100644 --- a/utils/error.ts +++ b/utils/error.ts @@ -1,6 +1,6 @@ -import { useGlobalI18n } from './i18n' +import { LLMEndpointType } from './llm/models' -export type ErrorCode = 'unknown' | 'requestError' | 'requestTimeout' | 'abortError' | 'timeoutError' | 'modelNotFound' | 'createTabStreamCaptureError' | 'translateError' | 'unsupportedEndpointType' | 'fetchError' | 'parseFunctionCallError' | 'aiSDKError' | 'generateObjectSchemaError' +export type ErrorCode = 'unknown' | 'requestError' | 'requestTimeout' | 'abortError' | 'timeoutError' | 'modelNotFound' | 'createTabStreamCaptureError' | 'translateError' | 'unsupportedEndpointType' | 'fetchError' | 'parseFunctionCallError' | 'aiSDKError' | 'generateObjectSchemaError' | 'lmStudioLoadModelError' | 'lmStudioDownloadModelError' // we use a base AppError class instead of extending the native Error class directly because of Firefox compatibility issues of error instance transform in postMessage export abstract class AppError { @@ -18,10 +18,6 @@ export abstract class AppError { this.message = message ?? '' } - async toLocaleMessage(_locale?: string): Promise { - return this.message - } - get stack(): string | undefined { return this.nativeError.stack } @@ -31,42 +27,32 @@ export class FetchError extends AppError<'fetchError'> { constructor(message: string) { super('fetchError', message) } - - async toLocaleMessage() { - return this.message - } } export class UnknownError extends AppError<'unknown'> { constructor(message: string) { super('unknown', message) } - - async toLocaleMessage() { - const { t } = await useGlobalI18n() - return t('errors.unknown_error', { message: this.message }) - } } export class ModelRequestError extends AppError<'requestError'> { - constructor(message: string) { + constructor(message: string, public endpointType?: LLMEndpointType) { super('requestError', message) } - - async toLocaleMessage() { - const { t } = await useGlobalI18n() - return t('errors.model_request_error') +} +export class LMStudioLoadModelError extends AppError<'lmStudioLoadModelError'> { + constructor(message: string) { + super('lmStudioLoadModelError', message) } } - -export class ModelNotFoundError extends AppError<'modelNotFound'> { - constructor(public model?: string) { - super('modelNotFound') +export class LMStudioDownloadModelError extends AppError<'lmStudioDownloadModelError'> { + constructor(message: string) { + super('lmStudioDownloadModelError', message) } - - async toLocaleMessage() { - const { t } = await useGlobalI18n() - return t('errors.model_not_found') +} +export class ModelNotFoundError extends AppError<'modelNotFound'> { + constructor(message: string | undefined, public endpointType?: LLMEndpointType) { + super('modelNotFound', message) } } @@ -74,51 +60,30 @@ export class ModelRequestTimeoutError extends AppError<'requestTimeout'> { constructor() { super('requestTimeout') } - - async toLocaleMessage() { - const { t } = await useGlobalI18n() - return t('errors.model_request_timeout') - } } export class UnsupportedEndpointType extends AppError<'unsupportedEndpointType'> { constructor(public endpointType: string) { super('unsupportedEndpointType') } - - async toLocaleMessage() { - return 'Unsupported endpoint type: ' + this.endpointType - } } export class AbortError extends AppError<'abortError'> { constructor(message: string) { super('abortError', message) } - - async toLocaleMessage() { - return 'Request aborted: ' + this.message - } } export class CreateTabStreamCaptureError extends AppError<'createTabStreamCaptureError'> { constructor(message?: string) { super('createTabStreamCaptureError', message) } - - async toLocaleMessage() { - return 'Failed to create tab stream capture' - } } export class TranslateError extends AppError<'translateError'> { constructor(message?: string) { super('translateError', message) } - - async toLocaleMessage() { - return 'Translation failed: ' + this.message - } } // common timeout error for various operations @@ -126,41 +91,24 @@ export class TimeoutError extends AppError<'timeoutError'> { constructor(message: string) { super('timeoutError', message) } - - async toLocaleMessage() { - const { t } = await useGlobalI18n() - return t('errors.timeout_error', { message: this.message }) - } } export class ParseFunctionCallError extends AppError<'parseFunctionCallError'> { constructor(message: string, public type?: 'toolNotFound' | 'invalidFormat', public toolName?: string) { super('parseFunctionCallError', message) } - - async toLocaleMessage() { - return this.message - } } export class GenerateObjectSchemaError extends AppError<'generateObjectSchemaError'> { constructor(message: string) { super('generateObjectSchemaError', message) } - - async toLocaleMessage() { - return this.message - } } export class AiSDKError extends AppError<'aiSDKError'> { constructor(message: string) { super('aiSDKError', message) } - - async toLocaleMessage() { - return this.message - } } const errors = { @@ -177,10 +125,15 @@ const errors = { parseFunctionCallError: ParseFunctionCallError, aiSDKError: AiSDKError, generateObjectSchemaError: GenerateObjectSchemaError, + lmStudioLoadModelError: LMStudioLoadModelError, + lmStudioDownloadModelError: LMStudioDownloadModelError, } satisfies Record> export function fromError(error: unknown): AppError { if (!AppError.isAppError(error)) { + if (error instanceof Error) { + return new UnknownError(error.message) + } return new UnknownError('An unknown error occurred') } const ctor = errors[error.code] || UnknownError diff --git a/utils/llm/lm-studio.ts b/utils/llm/lm-studio.ts new file mode 100644 index 00000000..c215473b --- /dev/null +++ b/utils/llm/lm-studio.ts @@ -0,0 +1,139 @@ +import { DownloadProgressUpdate, LLM, LLMLoadModelConfig, LMStudioClient, ModelSearchResultDownloadOption } from '@lmstudio/sdk' + +import Logger from '@/utils/logger' + +import { toAsyncIter } from '../async' +import { LMStudioDownloadModelError, LMStudioLoadModelError } from '../error' +import { memoFunction } from '../memo' +import { getUserConfig } from '../user-config' + +const logger = Logger.child('lm-studio') + +const cachedGetLMStudioClient = memoFunction((baseUrl: string) => { + const baseUrlObj = new URL(baseUrl) + baseUrlObj.pathname = '' + baseUrlObj.protocol = 'ws' + const lmStudio = new LMStudioClient({ baseUrl: baseUrlObj.origin }) + return lmStudio +}) + +async function getLMStudioClient() { + const userConfig = await getUserConfig() + const baseUrl = userConfig.llm.backends.lmStudio.baseUrl.get() + return cachedGetLMStudioClient(baseUrl) +} + +export async function getLocalModelList() { + try { + const lmStudio = await getLMStudioClient() + const models = await lmStudio.system.listDownloadedModels() + return { models: models.filter((m) => m.type === 'llm') } + } + catch (error) { + logger.error('Error fetching local model list:', error) + return { + models: [], + error: 'Failed to fetch local model list', + } + } +} + +export async function getRunningModelList() { + try { + const lmStudio = await getLMStudioClient() + const models = await lmStudio.llm.listLoaded() + const modelInstanceInfo = await Promise.all(models.map((m) => m.getModelInfo())) + return { models: modelInstanceInfo } + } + catch (error) { + logger.error('Error fetching running model list:', error) + return { + models: [], + error: 'Failed to fetch running model list', + } + } +} + +async function rawLoadModel(modelId: string, config?: LLMLoadModelConfig) { + try { + const lmStudio = await getLMStudioClient() + const models = await lmStudio.llm.listLoaded() + let existing + for (const m of models) { + if (m.modelKey === modelId) { + const info = await m.getModelInfo() + if (info.contextLength === config?.contextLength || !config?.contextLength) { + existing = m + break + } + } + } + const model = existing ?? await lmStudio.llm.load(modelId, { config }) + return { model, client: lmStudio } + } + catch (error) { + logger.error('Error loading model:', error) + throw new LMStudioLoadModelError(String(error)) + } +} + +let loadingPromise: Promise<{ model: LLM, client: LMStudioClient }> | undefined +export async function loadModel(modelId: string, config?: LLMLoadModelConfig) { + if (loadingPromise) return loadingPromise + loadingPromise = rawLoadModel(modelId, config) + return loadingPromise.finally(() => loadingPromise = undefined) +} + +export async function getModelInfo(modelId: string) { + const { models } = await getLocalModelList() + return models.find((m) => m.modelKey === modelId) +} + +export async function unloadModel(identifier: string) { + const lmStudio = await getLMStudioClient() + await lmStudio.llm.unload(identifier) +} + +export async function pullModel(options: { modelName: string, abortSignal?: AbortSignal, onProgress?: (progress: DownloadProgressUpdate) => void }) { + return toAsyncIter(async (yieldData, done) => { + try { + const lmStudio = await getLMStudioClient() + const { modelName, abortSignal } = options + const models = await lmStudio.repository.searchModels({ searchTerm: modelName, limit: 20 }) + let modelToDownload: ModelSearchResultDownloadOption | undefined + for (const model of models) { + const downloadOptions = await model.getDownloadOptions() + const recommendedModel = downloadOptions.find((opt) => opt.isRecommended()) + if (recommendedModel) { + modelToDownload = recommendedModel + break + } + } + if (!modelToDownload) { + throw new LMStudioDownloadModelError(`Model "${modelName}" not found in LM Studio model repository for your device, please download it manually in LM Studio.`) + } + await modelToDownload.download({ + signal: abortSignal, + onProgress: (progress) => { + yieldData(progress) + }, + }) + done() + } + catch (error) { + done(error) + } + }) +} + +export async function testConnection(): Promise { + try { + const lmStudio = await getLMStudioClient() + const _ = await lmStudio.system.getLMStudioVersion() + return true + } + catch (error) { + logger.error('Error testing LM Studio connection:', error) + return false + } +} diff --git a/utils/llm/middlewares.test.ts b/utils/llm/middlewares.test.ts new file mode 100644 index 00000000..7ddea057 --- /dev/null +++ b/utils/llm/middlewares.test.ts @@ -0,0 +1,116 @@ +import { LanguageModelV1, LanguageModelV1CallOptions, LanguageModelV1StreamPart } from 'ai' +import { describe, expect, it } from 'vitest' + +import { lmStudioHarmonyEncodingMiddleware } from './middlewares' + +describe('llm middlewares', () => { + const fakeResponse = (input: string) => { + const splitWithSpecialTokens = (str: string) => { + return str.match(/<\|[^|]*\|>|./g) || [] + } + + return { + params: {} as LanguageModelV1CallOptions, + model: {} as LanguageModelV1, + doGenerate(): ReturnType { + return Promise.resolve({ + text: input, + finishReason: 'stop', + usage: { promptTokens: 0, completionTokens: 0 }, + rawCall: { rawPrompt: '', rawSettings: {} }, + rawResponse: {}, + warnings: [], + }) + }, + doStream(): ReturnType { + const chunks = splitWithSpecialTokens(input) + const stream = new ReadableStream({ + start(controller) { + for (const chunk of chunks) { + controller.enqueue({ + type: 'text-delta', + textDelta: chunk, + }) + } + controller.enqueue({ + type: 'finish', + finishReason: 'stop', + usage: { promptTokens: 0, completionTokens: 0 }, + }) + controller.close() + }, + }) + + return Promise.resolve({ + stream, + finishReason: 'stop', + rawCall: { rawPrompt: '', rawSettings: {} }, + }) + }, + } + } + + const readStream = async (stream: ReadableStream) => { + const reader = stream.getReader() + let content = '' + let reasoning = '' + const toolCalls: Array<{ name: string, arguments: string }> = [] + while (true) { + const { done, value } = await reader.read() + if (done) break + if (value.type === 'text-delta') { + content += value.textDelta + } + else if (value.type === 'reasoning') { + reasoning += value.textDelta + } + else if (value.type === 'tool-call') { + toolCalls.push({ name: value.toolName, arguments: value.args }) + } + } + return { content, reasoning, toolCalls } + } + + it('should parse lm-studio gpt-oss harmony response', async () => { + const r = await lmStudioHarmonyEncodingMiddleware.wrapStream!(fakeResponse( + `<|channel|>commentary<|constrain|>response<|message|>Here is the final response.`, + )) + const { content } = await readStream(r.stream) + expect(content).toBe('Here is the final response.') + }) + + it('should parse lm-studio gpt-oss harmony function call', async () => { + const r = await lmStudioHarmonyEncodingMiddleware.wrapStream!(fakeResponse( + `Hello<|channel|>commentary to=view_tab<|constrain|>json<|message|>{"tab_id": "123"}`, + )) + const { content, toolCalls } = await readStream(r.stream) + expect(content).toBe('Hello') + expect(toolCalls).toEqual([{ name: 'view_tab', arguments: '{"tab_id": "123"}' }]) + }) + + it('should parse lm-studio gpt-oss harmony function call (2)', async () => { + const r = await lmStudioHarmonyEncodingMiddleware.wrapStream!(fakeResponse( + `Let me click on the first model listed to see what additional information and related resources are available.<|channel|>commentary to=click>{"element_id":"14"}`, + )) + const { content, toolCalls } = await readStream(r.stream) + expect(content).toBe('Let me click on the first model listed to see what additional information and related resources are available.') + expect(toolCalls).toEqual([{ name: 'click', arguments: '{"element_id":"14"}' }]) + }) + + it('should parse lm-studio gpt-oss harmony function call (3)', async () => { + const r = await lmStudioHarmonyEncodingMiddleware.wrapStream!(fakeResponse( + `Hello<|channel|>commentary to=tool_calls <|constrain|>json<|message|>{"name":"view_tab","arguments":{"tab_id":"we0sttv9j0"}} world.`, + )) + const { content, toolCalls } = await readStream(r.stream) + expect(content).toBe('Hello world.') + expect(toolCalls).toEqual([{ name: 'tool_calls', arguments: '{"name":"view_tab","arguments":{"tab_id":"we0sttv9j0"}}' }]) + }) + + it('should parse lm-studio gpt-oss harmony json object (1)', async () => { + const r = await lmStudioHarmonyEncodingMiddleware.wrapStream!(fakeResponse( + `<|channel|>commentary<|constrain|>json<|message|>{"some": "json", "with": {"nested": "values"}}`, + )) + const { content } = await readStream(r.stream) + expect(content).toBe('{"some": "json", "with": {"nested": "values"}}') + }) +}) diff --git a/utils/llm/middlewares.ts b/utils/llm/middlewares.ts index 54a70f36..ba7c24fe 100644 --- a/utils/llm/middlewares.ts +++ b/utils/llm/middlewares.ts @@ -98,7 +98,7 @@ function normalizeToolCall(toolCall: // sometimes gpt-oss return tool named xxx.toolName, we should normalize it to toolName return toolName.split('.').pop()! } - if (toolCall.toolName === 'tool_calls' && toolCall.args) { + if ((toolCall.toolName === 'tool_calls' || toolCall.toolName === 'tool_result' || toolCall.toolName === 'tool_results') && toolCall.args) { // 1. {name: 'tool_calls', arguments: { name: , arguments: { a: 1 } }} const newToolCall = safeParseJSON({ text: toolCall.args, schema: z.object({ name: z.string(), arguments: z.any() }) }) if (newToolCall.success) { @@ -195,8 +195,9 @@ export const rawLoggingMiddleware: LanguageModelV1Middleware = { log.debug('Stream started', { params }) let text = '' + let reasoning = '' const printLog = debounce(() => { - log.debug('Stream progress', { text }) + log.debug('Stream progress', { text, reasoning }) }, 2000) const transformStream = new TransformStream< @@ -207,6 +208,9 @@ export const rawLoggingMiddleware: LanguageModelV1Middleware = { if (chunk.type === 'text-delta') { text += chunk.textDelta } + else if (chunk.type === 'reasoning') { + reasoning += chunk.textDelta + } printLog() controller.enqueue(chunk) }, @@ -219,9 +223,223 @@ export const rawLoggingMiddleware: LanguageModelV1Middleware = { }, } +const errorResponse = /<\|channel\|>(?!\s*commentary\s+to=[a-z_.]+\s*>)[^<]+>(<\/assistant)?/gs +const extractHarmonyResponse = /<\|channel\|>commentary\s*?<\|constrain\|>\s*(?!json\b)(.*?)<\|message\|>/s +const extractHarmonyFunctionCall = /<\|channel\|>commentary(.+)?to=([a-z_.]+).+?\{/s +const extractHarmonyJSONObject = /<\|channel\|>(.+)?<\|constrain\|>json<\|message\|>\{/s + +function sliceBalancedJson(str: string, startIdx: number) { + let i = startIdx + let depth = 0 + let inString = false + let escape = false + + for (; i < str.length; i++) { + const ch = str[i] + + if (inString) { + if (escape) { + escape = false + } + else if (ch === '\\') { + escape = true + } + else if (ch === '"') { + inString = false + } + continue + } + + // 不在字符串里 + if (ch === '"') { + inString = true + } + else if (ch === '{') { + depth++ + } + else if (ch === '}') { + depth-- + if (depth === 0) { + return str.slice(startIdx, i + 1) + } + } + } + return null +} + +export const lmStudioHarmonyEncodingMiddleware: LanguageModelV1Middleware = { + wrapGenerate: async ({ doGenerate }) => { + const log = logger.child('lmStudioHarmonyParserMiddleware') + + const result = await doGenerate() + + const originalText = result.text ?? '' + result.text = originalText?.replace(extractHarmonyResponse, '').replace(errorResponse, '') + + const matchedFunctionCall = result.text?.match(extractHarmonyFunctionCall) + + if (matchedFunctionCall) { + const matchStartIdx = matchedFunctionCall.index! + const jsonStartIdx = matchStartIdx + matchedFunctionCall[0].lastIndexOf('{') + const balancedJson = sliceBalancedJson(originalText, jsonStartIdx) + const jsonEndIdx = balancedJson ? (jsonStartIdx + balancedJson.length) : jsonStartIdx + if (balancedJson) { + result.text = originalText.slice(0, matchStartIdx) + originalText.slice(jsonEndIdx) + const [_, _1, functionName, parameters] = matchedFunctionCall + result.toolCalls = result.toolCalls ?? [] + result.toolCalls.push({ + toolCallId: generateRandomId(), + toolCallType: 'function', + toolName: functionName.trim(), + args: balancedJson.trim(), + }) + log.debug('Harmony function call extracted', { functionName, parameters }) + } + } + else { + const matchedJSONObject = result.text?.match(extractHarmonyJSONObject) + if (matchedJSONObject) { + const matchStartIdx = matchedJSONObject.index! + const jsonStartIdx = matchStartIdx + matchedJSONObject[0].lastIndexOf('{') + const balancedJson = sliceBalancedJson(originalText, jsonStartIdx) + const jsonEndIdx = balancedJson ? (jsonStartIdx + balancedJson.length) : jsonStartIdx + const [_raw, _channelName] = matchedJSONObject + result.text = originalText.slice(0, matchStartIdx) + originalText.slice(jsonEndIdx) + log.debug('Harmony json object extracted', { originalText, text: result.text }) + } + } + + return result + }, + wrapStream: async ({ doStream, params }) => { + const log = logger.child('lmStudioHarmonyParserMiddleware') + + const { stream, ...rest } = await doStream() + + log.debug('Stream started', { params }) + + let channelStarted = false + let channelContent = '' + const TOKEN_CHANNEL = '<|channel|>' + + const transformStream = new TransformStream< + LanguageModelV1StreamPart, + LanguageModelV1StreamPart + >({ + transform(chunk, controller) { + if (chunk.type === 'text-delta') { + if (!channelStarted && chunk.textDelta.trim().startsWith(TOKEN_CHANNEL)) { + channelStarted = true + } + if (channelStarted) { + channelContent += chunk.textDelta + const matchedResponse = channelContent.match(extractHarmonyResponse) + if (matchedResponse) { + const restContent = channelContent.replace(extractHarmonyResponse, '') + log.debug('extract response or error from harmony response', { channelContent, restContent }) + if (restContent.trim().startsWith(TOKEN_CHANNEL)) { + channelContent = restContent + channelStarted = true + } + else { + controller.enqueue({ + type: 'text-delta', + textDelta: restContent, + }) + channelStarted = false + channelContent = '' + } + } + if (channelStarted) { + const matchedFunctionCall = channelContent.match(extractHarmonyFunctionCall) + if (matchedFunctionCall) { + const matchStartIdx = matchedFunctionCall.index! + const jsonStartIdx = matchStartIdx + matchedFunctionCall[0].lastIndexOf('{') + const balancedJson = sliceBalancedJson(channelContent, jsonStartIdx) + const jsonEndIdx = balancedJson ? (jsonStartIdx + balancedJson.length) : jsonStartIdx + if (balancedJson) { + const [_, _1, functionName] = matchedFunctionCall + controller.enqueue({ + type: 'tool-call', + toolCallId: generateRandomId(), + toolCallType: 'function', + toolName: functionName.trim(), + args: balancedJson.trim(), + }) + const restContent = channelContent.slice(0, matchStartIdx) + channelContent.slice(jsonEndIdx) + if (restContent) { + controller.enqueue({ + type: 'text-delta', + textDelta: restContent, + }) + } + log.debug('extract function call from harmony response', { channelContent, functionName, parameters: balancedJson, restContent }) + channelStarted = false + channelContent = '' + } + } + else { + const matchedJSONObject = channelContent.match(extractHarmonyJSONObject) + if (matchedJSONObject) { + const matchStartIdx = matchedJSONObject.index! + const jsonStartIdx = matchStartIdx + matchedJSONObject[0].lastIndexOf('{') + const balancedJson = sliceBalancedJson(channelContent, jsonStartIdx) + const jsonEndIdx = balancedJson ? (jsonStartIdx + balancedJson.length) : jsonStartIdx + const [_raw, _channelName] = matchedJSONObject + if (balancedJson) { + controller.enqueue({ + type: 'text-delta', + textDelta: balancedJson, + }) + const restContent = channelContent.slice(0, matchStartIdx) + channelContent.slice(jsonEndIdx) + if (restContent) { + controller.enqueue({ + type: 'text-delta', + textDelta: restContent, + }) + } + log.debug('extract json object from harmony response', { channelContent, balancedJson, restContent }) + } + } + else { + const matchedError = channelContent.match(errorResponse) + if (matchedError) { + const restContent = channelContent.replace(errorResponse, '') + if (restContent) { + controller.enqueue({ + type: 'text-delta', + textDelta: restContent, + }) + } + log.debug('extract error from harmony response', { channelContent, restContent }) + channelStarted = false + channelContent = '' + } + } + } + } + } + else { + controller.enqueue(chunk) + } + } + else { + controller.enqueue(chunk) + } + }, + }) + + return { + stream: stream.pipeThrough(transformStream), + ...rest, + } + }, +} + export const middlewares = [ + // rawLoggingMiddleware, normalizeToolCallsMiddleware, extractPromptBasedToolCallsMiddleware, + lmStudioHarmonyEncodingMiddleware, reasoningMiddleware, - // rawLoggingMiddleware, ] diff --git a/utils/llm/models.ts b/utils/llm/models.ts index 6644149f..dda2aa9d 100644 --- a/utils/llm/models.ts +++ b/utils/llm/models.ts @@ -4,7 +4,9 @@ import { getUserConfig } from '@/utils/user-config' import { ModelNotFoundError } from '../error' import { makeCustomFetch } from '../fetch' +import { loadModel as loadLMStudioModel } from './lm-studio' import { middlewares } from './middlewares' +import { LMStudioChatLanguageModel } from './providers/lm-studio/chat-language-model' import { createOllama } from './providers/ollama' import { WebLLMChatLanguageModel } from './providers/web-llm/openai-compatible-chat-language-model' import { isToggleableThinkingModel } from './thinking-models' @@ -13,13 +15,14 @@ import { getWebLLMEngine, WebLLMSupportedModel } from './web-llm' export async function getModelUserConfig() { const userConfig = await getUserConfig() const model = userConfig.llm.model.get() - const baseUrl = userConfig.llm.baseUrl.get() + const endpointType = userConfig.llm.endpointType.get() + const baseUrl = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].baseUrl.get() const apiKey = userConfig.llm.apiKey.get() - const numCtx = userConfig.llm.numCtx.get() - const enableNumCtx = userConfig.llm.enableNumCtx.get() + const numCtx = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].numCtx.get() + const enableNumCtx = userConfig.llm.backends[endpointType === 'lm-studio' ? 'lmStudio' : 'ollama'].enableNumCtx.get() const reasoning = userConfig.llm.reasoning.get() if (!model) { - throw new ModelNotFoundError() + throw new ModelNotFoundError(undefined, endpointType) } return { baseUrl, @@ -28,6 +31,7 @@ export async function getModelUserConfig() { numCtx, enableNumCtx, reasoning, + endpointType, } } @@ -41,11 +45,11 @@ export async function getModel(options: { enableNumCtx: boolean reasoning: boolean autoThinking?: boolean + endpointType: LLMEndpointType onLoadingModel?: (prg: ModelLoadingProgressEvent) => void }) { - const userConfig = await getUserConfig() + const endpointType = options.endpointType let model: LanguageModelV1 - const endpointType = userConfig.llm.endpointType.get() if (endpointType === 'ollama') { const customFetch = makeCustomFetch({ bodyTransformer: (body) => { @@ -54,7 +58,7 @@ export async function getModel(options: { if (typeof body !== 'string') return body // add additional check to avoid errors, eg gamma3 does not support think argument - const _isToggleableThinkingModel = isToggleableThinkingModel(options.model) + const _isToggleableThinkingModel = isToggleableThinkingModel('ollama', options.model) const parsedBody = JSON.parse(body) return JSON.stringify({ @@ -72,6 +76,10 @@ export async function getModel(options: { structuredOutputs: true, }) } + else if (endpointType === 'lm-studio') { + const lmStudioClientModel = await loadLMStudioModel(options.model, { contextLength: options.enableNumCtx ? options.numCtx : undefined }) + model = new LMStudioChatLanguageModel(lmStudioClientModel.client, lmStudioClientModel.model) + } else if (endpointType === 'web-llm') { const engine = await getWebLLMEngine({ model: options.model as WebLLMSupportedModel, @@ -93,7 +101,7 @@ export async function getModel(options: { } return wrapLanguageModel({ model, - middleware: middlewares, + middleware: middlewares.slice(), }) } diff --git a/utils/llm/ollama.ts b/utils/llm/ollama.ts index 0e8df82f..89f61de9 100644 --- a/utils/llm/ollama.ts +++ b/utils/llm/ollama.ts @@ -7,7 +7,7 @@ import { getUserConfig } from '../user-config' async function getOllamaClient() { const userConfig = await getUserConfig() - const baseUrl = userConfig.llm.baseUrl.get() + const baseUrl = userConfig.llm.backends.ollama.baseUrl.get() const origin = new URL(baseUrl).origin const ollama = new Ollama({ host: origin }) return ollama @@ -109,3 +109,20 @@ export async function showModelDetails(modelId: string) { capabilities?: ModelCapability[] } } + +export async function testConnection() { + const userConfig = await getUserConfig() + try { + const baseUrl = userConfig.llm.backends.ollama.baseUrl.get() + const origin = new URL(baseUrl).origin + const response = await fetch(origin) + if (!response.ok) return false + const text = await response.text() + if (text.includes('Ollama is running')) return true + else return false + } + catch (error: unknown) { + logger.error('error connecting to ollama api', error) + return false + } +} diff --git a/utils/llm/predefined-models.ts b/utils/llm/predefined-models.ts index 5a123d29..6e7d54df 100644 --- a/utils/llm/predefined-models.ts +++ b/utils/llm/predefined-models.ts @@ -1,7 +1,7 @@ export type PredefinedOllamaModel = { name: string id: string - size: number + size?: number description?: string url?: string tags?: string[] @@ -71,3 +71,50 @@ export const PREDEFINED_OLLAMA_MODELS: PredefinedOllamaModel[] = [ size: 12.83 * GB, }, ] + +export const PREDEFINED_LM_STUDIO_MODELS: PredefinedOllamaModel[] = [ + { + name: 'DeepSeek-R1 8B', + id: 'lmstudio-community/DeepSeek-R1-0528-Qwen3-8B', + }, + { + name: 'DeepSeek-R1 14B', + id: 'lmstudio-community/DeepSeek-R1-Distill-Qwen-14B', + }, + { + name: 'Qwen3 4B', + id: 'lmstudio-community/Qwen3-4B', + }, + { + name: 'Qwen3 8B', + id: 'lmstudio-community/Qwen3-8B', + }, + { + name: 'Qwen3 14B', + id: 'lmstudio-community/Qwen3-14B', + }, + { + name: 'Llama3.2 3B', + id: 'lmstudio-community/Llama-3.2-3B', + }, + { + name: 'Gemma3 4B', + id: 'lmstudio-community/Gemma-3-4B', + }, + { + name: 'Gemma3 12B', + id: 'lmstudio-community/Gemma-3-12B', + }, + { + name: 'Phi4-mini', + id: 'lmstudio-community/Phi-4-mini-reasoning', + }, + { + name: 'Phi4', + id: 'lmstudio-community/Phi-4-reasoning', + }, + { + name: 'gpt-oss 20B', + id: 'lmstudio-community/gpt-oss-20b', + }, +] diff --git a/utils/llm/providers/lm-studio/chat-language-model.ts b/utils/llm/providers/lm-studio/chat-language-model.ts new file mode 100644 index 00000000..51523ef4 --- /dev/null +++ b/utils/llm/providers/lm-studio/chat-language-model.ts @@ -0,0 +1,196 @@ +import { + LanguageModelV1, + LanguageModelV1CallWarning, + LanguageModelV1ObjectGenerationMode, + LanguageModelV1StreamPart, +} from '@ai-sdk/provider' +import { LLM, LLMPredictionFragment, LMStudioClient, PredictionResult } from '@lmstudio/sdk' + +import { logger } from '@/utils/logger' + +import { convertToLMStudioMessages } from './convert-to-lm-studio-messages' +import { mapStopReason } from './map-stop-reason' + +const log = logger.child('Agent').child('lm-studio-model-provider') + +export type OpenAICompatibleChatConfig = { + provider: string + includeUsage?: boolean + + /** +Default object generation mode that should be used with this model when +no mode is specified. Should be the mode with the best results for this +model. `undefined` can be specified if object generation is not supported. + */ + defaultObjectGenerationMode?: LanguageModelV1ObjectGenerationMode + + /** + * Whether the model supports structured outputs. + */ + supportsStructuredOutputs?: boolean +} + +export class LMStudioChatLanguageModel implements LanguageModelV1 { + readonly specificationVersion = 'v1' + + private readonly model: LLM + readonly modelId: string + readonly provider = 'lm-studio' + readonly client: LMStudioClient + + constructor(client: LMStudioClient, model: LLM) { + this.model = model + this.modelId = model.modelKey + this.client = client + } + + get defaultObjectGenerationMode(): 'json' | 'tool' | undefined { + return 'json' + } + + private async getArgs({ + prompt, + maxTokens, + temperature, + topP, + topK, + frequencyPenalty, + presencePenalty, + stopSequences, + responseFormat, + seed, + abortSignal, + }: Parameters[0]) { + const warnings: LanguageModelV1CallWarning[] = [] + + const baseArgs = { + signal: abortSignal, + // standardized settings: + maxTokens, + temperature, + topP, + topK, + frequencyPenalty, + presencePenalty, + response_format: + responseFormat?.type === 'json' + ? responseFormat.schema != null + ? { + type: 'json' as const, + schema: JSON.stringify(responseFormat.schema), + } + : undefined + : undefined, + + stop: stopSequences, + seed, + // messages: + messages: await convertToLMStudioMessages(this.client, prompt), + } + + return { + args: { ...baseArgs }, + warnings, + } + } + + async doGenerate(options: Parameters[0]): Promise>> { + const { args, warnings } = await this.getArgs({ ...options }) + + const body = args + + log.debug('doGenerate called', { args }) + + const responseBody = await this.model.respond(args.messages, { + signal: body.signal, + maxTokens: body.maxTokens, + topPSampling: body.topP, + topKSampling: body.topK, + temperature: body.temperature, + stopStrings: body.stop, + }) + + const { messages: rawPrompt, ...rawSettings } = args + + return { + text: responseBody.nonReasoningContent, + reasoning: responseBody.reasoningContent, + finishReason: mapStopReason(responseBody.stats.stopReason), + usage: { + promptTokens: responseBody.stats.promptTokensCount ?? Number.NaN, + completionTokens: responseBody.stats.predictedTokensCount ?? Number.NaN, + }, + rawCall: { rawPrompt, rawSettings }, + rawResponse: {}, + warnings, + } + } + + async doStream(options: Parameters[0]): Promise>> { + const { args, warnings } = await this.getArgs({ ...options }) + + const body = args + log.debug('doStream called', { args }) + const resp = this.model.respond(args.messages, { + signal: body.signal, + maxTokens: body.maxTokens, + topPSampling: body.topP, + topKSampling: body.topK, + temperature: body.temperature, + stopStrings: body.stop, + }) + + const { messages: rawPrompt, ...rawSettings } = args + + const readable = new ReadableStream<{ type: 'chunk', data: LLMPredictionFragment } | { type: 'result', data: PredictionResult }>({ + async start(controller) { + for await (const chunk of resp) { + controller.enqueue({ type: 'chunk', data: chunk }) + } + const result = await resp + log.debug('stream finished', { result }) + controller.enqueue({ type: 'result', data: result }) + controller.close() + }, + }) + + return { + stream: readable.pipeThrough( + new TransformStream<{ type: 'chunk', data: LLMPredictionFragment } | { type: 'result', data: PredictionResult }, LanguageModelV1StreamPart>({ + transform(chunk, controller) { + if (chunk.type === 'result') { + controller.enqueue({ + type: 'finish', + finishReason: mapStopReason(chunk.data.stats.stopReason), + usage: { + promptTokens: chunk.data.stats.promptTokensCount ?? NaN, + completionTokens: chunk.data.stats.predictedTokensCount ?? NaN, + }, + }) + } + else { + const delta = chunk.data + + if (delta.reasoningType === 'reasoning') { + controller.enqueue({ + type: 'reasoning', + textDelta: delta.content, + }) + } + else if (delta.reasoningType === 'none' && !delta.isStructural) { + controller.enqueue({ + type: 'text-delta', + textDelta: delta.content, + }) + } + } + }, + }), + ), + rawCall: { rawPrompt, rawSettings }, + rawResponse: {}, + warnings, + request: { body: JSON.stringify(body) }, + } + } +} diff --git a/utils/llm/providers/lm-studio/chat-settings.ts b/utils/llm/providers/lm-studio/chat-settings.ts new file mode 100644 index 00000000..9ade4f24 --- /dev/null +++ b/utils/llm/providers/lm-studio/chat-settings.ts @@ -0,0 +1,18 @@ +export type OpenAICompatibleChatModelId = string + +export interface OpenAICompatibleChatSettings { + /** +A unique identifier representing your end-user, which can help the provider to +monitor and detect abuse. + */ + user?: string + + /** +Simulates streaming by using a normal generate call and returning it as a stream. +Enable this if the model that you are using does not support streaming. + +Defaults to `false`. +@deprecated Use `simulateStreamingMiddleware` instead. + */ + simulateStreaming?: boolean +} diff --git a/utils/llm/providers/lm-studio/convert-to-lm-studio-messages.ts b/utils/llm/providers/lm-studio/convert-to-lm-studio-messages.ts new file mode 100644 index 00000000..841a4d4d --- /dev/null +++ b/utils/llm/providers/lm-studio/convert-to-lm-studio-messages.ts @@ -0,0 +1,59 @@ +import { LanguageModelV1Prompt, UnsupportedFunctionalityError } from '@ai-sdk/provider' +import { convertUint8ArrayToBase64 } from '@ai-sdk/provider-utils' +import { ChatMessageInput, LMStudioClient } from '@lmstudio/sdk' + +export async function convertToLMStudioMessages(client: LMStudioClient, prompt: LanguageModelV1Prompt): Promise { + const messages: ChatMessageInput[] = [] + for (const { role, content } of prompt) { + switch (role) { + case 'system': { + messages.push({ role: 'system', content }) + break + } + + case 'user': { + if (content.length === 1 && content[0].type === 'text') { + messages.push({ role: 'user', content: content[0].text }) + break + } + + const images = await Promise.all(content.filter((part) => part.type === 'image').map(async (part) => { + if (part.image instanceof URL) { + throw new UnsupportedFunctionalityError({ + functionality: 'Image URLs in user messages', + }) + } + return await client.files.prepareImageBase64('image.png', convertUint8ArrayToBase64(part.image)) + })) + + messages.push({ + role: 'user', + content: content.filter((part) => part.type === 'text').map((part) => part.text).join(''), + images: images.length > 0 ? images : undefined, + }) + + break + } + + case 'assistant': { + messages.push({ + role: 'assistant', + content: content.filter((part) => part.type === 'text').map((part) => part.text).join(''), + }) + + break + } + + case 'tool': { + break + } + + default: { + const _exhaustiveCheck: never = role + throw new Error(`Unsupported role: ${_exhaustiveCheck}`) + } + } + } + + return messages +} diff --git a/utils/llm/providers/lm-studio/map-stop-reason.ts b/utils/llm/providers/lm-studio/map-stop-reason.ts new file mode 100644 index 00000000..6282c093 --- /dev/null +++ b/utils/llm/providers/lm-studio/map-stop-reason.ts @@ -0,0 +1,17 @@ +import { LanguageModelV1FinishReason } from '@ai-sdk/provider' +import { LLMPredictionStopReason } from '@lmstudio/sdk' + +const Mapping: Record = { + contextLengthReached: 'length', + eosFound: 'stop', + failed: 'error', + maxPredictedTokensReached: 'length', + modelUnloaded: 'error', + stopStringFound: 'stop', + userStopped: 'stop', + toolCalls: 'tool-calls', +} + +export function mapStopReason(stopReason: LLMPredictionStopReason): LanguageModelV1FinishReason { + return Mapping[stopReason] +} diff --git a/utils/llm/providers/lm-studio/openai-compatible-prepare-tools.ts b/utils/llm/providers/lm-studio/openai-compatible-prepare-tools.ts new file mode 100644 index 00000000..8b586d0b --- /dev/null +++ b/utils/llm/providers/lm-studio/openai-compatible-prepare-tools.ts @@ -0,0 +1,88 @@ +import { LanguageModelV1, LanguageModelV1CallWarning, UnsupportedFunctionalityError } from '@ai-sdk/provider' + +export function prepareTools({ + mode, +}: { + mode: Parameters[0]['mode'] & { + type: 'regular' + } + structuredOutputs: boolean +}): { + tools: + | undefined + | Array<{ + type: 'function' + function: { + name: string + description: string | undefined + parameters: unknown + } + }> + tool_choice: { type: 'function', function: { name: string } } | 'auto' | 'none' | 'required' | undefined + toolWarnings: LanguageModelV1CallWarning[] + } { + // when the tools array is empty, change it to undefined to prevent errors: + const tools = mode.tools?.length ? mode.tools : undefined + const toolWarnings: LanguageModelV1CallWarning[] = [] + + if (tools == null) { + return { tools: undefined, tool_choice: undefined, toolWarnings } + } + + const toolChoice = mode.toolChoice + + const openaiCompatTools: Array<{ + type: 'function' + function: { + name: string + description: string | undefined + parameters: unknown + } + }> = [] + + for (const tool of tools) { + if (tool.type === 'provider-defined') { + toolWarnings.push({ type: 'unsupported-tool', tool }) + } + else { + openaiCompatTools.push({ + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters, + }, + }) + } + } + + if (toolChoice == null) { + return { tools: openaiCompatTools, tool_choice: undefined, toolWarnings } + } + + const type = toolChoice.type + + switch (type) { + case 'auto': + case 'none': + case 'required': + return { tools: openaiCompatTools, tool_choice: type, toolWarnings } + case 'tool': + return { + tools: openaiCompatTools, + tool_choice: { + type: 'function', + function: { + name: toolChoice.toolName, + }, + }, + toolWarnings, + } + default: { + const _exhaustiveCheck: never = type + throw new UnsupportedFunctionalityError({ + functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`, + }) + } + } +} diff --git a/utils/llm/thinking-models.ts b/utils/llm/thinking-models.ts index 7356e8a2..898ef8b9 100644 --- a/utils/llm/thinking-models.ts +++ b/utils/llm/thinking-models.ts @@ -1,6 +1,9 @@ +import { LLMEndpointType } from './models' + export const THINKING_OLLAMA_MODELS: string[] = ['deepseek-r1', 'qwen3', 'magistral'] -export const isToggleableThinkingModel = (model: string): boolean => { +export const isToggleableThinkingModel = (endpointType: LLMEndpointType, model: string): boolean => { + if (endpointType !== 'ollama') return false // check if model is contains thinking model name return THINKING_OLLAMA_MODELS.some((thinkingModel) => model.includes(thinkingModel)) } diff --git a/utils/pinia-store/store.ts b/utils/pinia-store/store.ts index f34af990..523dd69c 100644 --- a/utils/pinia-store/store.ts +++ b/utils/pinia-store/store.ts @@ -1,6 +1,7 @@ import { defineStore } from 'pinia' -import { ref } from 'vue' +import { computed, ref } from 'vue' +import { LMStudioModelInfo } from '@/types/lm-studio-models' import { OllamaModelInfo } from '@/types/ollama-models' import { logger } from '@/utils/logger' import { c2bRpc, s2bRpc, settings2bRpc } from '@/utils/rpc' @@ -17,13 +18,14 @@ const rpc = forRuntimes({ default: () => { throw new Error('Unsupported runtime') }, }) -export const useOllamaStatusStore = defineStore('ollama-status', () => { - const modelList = ref([]) - const connectionStatus = ref<'connected' | 'error' | 'unconnected'>('unconnected') - const updateModelList = async (): Promise => { +export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => { + // Ollama model list and connection status + const ollamaModelList = ref([]) + const ollamaConnectionStatus = ref<'connected' | 'error' | 'unconnected'>('unconnected') + const updateOllamaModelList = async (): Promise => { try { - const response = await rpc.getLocalModelList() - connectionStatus.value = 'connected' + const response = await rpc.getOllamaLocalModelList() + ollamaConnectionStatus.value = 'connected' log.debug('Model list fetched:', response) // Check thinking support for each model @@ -34,31 +36,76 @@ export const useOllamaStatusStore = defineStore('ollama-status', () => { })), ) - modelList.value = modelsWithThinkingSupport - return modelList.value + ollamaModelList.value = modelsWithThinkingSupport + return ollamaModelList.value } catch (error) { log.error('Failed to fetch model list:', error) - connectionStatus.value = 'error' + ollamaConnectionStatus.value = 'error' return [] } } - const clearModelList = () => { - modelList.value = [] + const clearOllamaModelList = () => { + ollamaModelList.value = [] + } + const deleteOllamaModel = async (model: string) => { + await rpc.deleteOllamaModel(model) + await updateOllamaModelList() } - const connectionStatusLoading = ref(false) - const updateConnectionStatus = async () => { - connectionStatusLoading.value = true + const ollamaConnectionStatusLoading = ref(false) + const updateOllamaConnectionStatus = async () => { + ollamaConnectionStatusLoading.value = true const success = await rpc.testOllamaConnection().catch(() => false) - connectionStatus.value = success ? 'connected' : 'error' - connectionStatusLoading.value = false + ollamaConnectionStatus.value = success ? 'connected' : 'error' + ollamaConnectionStatusLoading.value = false return success } - const unloadModel = async (model: string) => { + const unloadOllamaModel = async (model: string) => { await rpc.unloadOllamaModel(model) - await updateModelList() + await updateOllamaModelList() + } + + // LMStudio model list and connection status + const lmStudioModelList = ref([]) + const updateLMStudioModelList = async (): Promise => { + try { + const response = await rpc.getLMStudioModelList() + const runningModels = await rpc.getLMStudioRunningModelList().catch(() => ({ models: [] })) + log.debug('LMStudio Model list fetched:', response, runningModels) + lmStudioModelList.value = response.models.map((model) => { + const instances = runningModels.models.filter((m) => m.modelKey === model.modelKey) + return { + ...model, + instances, + } + }) + return lmStudioModelList.value + } + catch (error) { + log.error('Failed to fetch LMStudio model list:', error) + return [] + } + } + + const unloadLMStudioModel = async (identifier: string) => { + await rpc.unloadLMStudioModel(identifier) + await updateLMStudioModelList() + } + + const clearLMStudioModelList = () => { + lmStudioModelList.value = [] + } + + const lmStudioConnectionStatus = ref<'unconnected' | 'connected'>('unconnected') + const lmStudioConnectionStatusLoading = ref(false) + const updateLMStudioConnectionStatus = async () => { + lmStudioConnectionStatusLoading.value = true + const success = await rpc.testLMStudioConnection().catch(() => false) + lmStudioConnectionStatus.value = success ? 'connected' : 'unconnected' + lmStudioConnectionStatusLoading.value = false + return success } const checkCurrentModelSupportVision = async () => { @@ -84,27 +131,86 @@ export const useOllamaStatusStore = defineStore('ollama-status', () => { } } - const initDefaultModel = async () => { + const modelList = computed(() => { + return [ + ...ollamaModelList.value.map((m) => ({ + backend: 'ollama' as const, + model: m.model, + name: m.name, + })), + ...lmStudioModelList.value.map((m) => ({ + backend: 'lm-studio' as const, + model: m.modelKey, + name: m.displayName ?? m.modelKey, + })), + ] + }) + + // this function has side effects: it may change the common model in user config + const checkCurrentBackendStatus = async () => { const userConfig = await getUserConfig() const endpointType = userConfig.llm.endpointType.get() const commonModelConfig = userConfig.llm.model - const modelList = await updateModelList() - if (endpointType === 'ollama' && !modelList.some((model) => model.model === commonModelConfig.get())) { - commonModelConfig.set(modelList[0]?.model) + let status: 'no-model' | 'ok' | 'backend-unavailable' = 'ok' + if (endpointType === 'ollama') { + const backendStatus = await updateOllamaConnectionStatus() + if (backendStatus) { + const ollamaModelList = await updateOllamaModelList() + if (!ollamaModelList.some((model) => model.model === commonModelConfig.get())) { + if (ollamaModelList.length) { + commonModelConfig.set(ollamaModelList[0]?.model) + status = 'ok' + } + else { status = 'no-model' } + } + } + else { status = 'backend-unavailable' } + } + else if (endpointType === 'lm-studio') { + const backendStatus = await updateLMStudioConnectionStatus() + if (backendStatus) { + const lmStudioModelList = await updateLMStudioModelList() + if (!lmStudioModelList.some((model) => model.modelKey === commonModelConfig.get())) { + if (lmStudioModelList.length) { + commonModelConfig.set(lmStudioModelList[0]?.modelKey) + status = 'ok' + } + else { status = 'no-model' } + } + } + else { status = 'backend-unavailable' } } - return { modelList, commonModel: commonModelConfig.get() } + return { modelList, commonModel: commonModelConfig.get(), status, endpointType } + } + + const updateModelList = async () => { + await Promise.allSettled([updateOllamaModelList(), updateLMStudioModelList()]) + return modelList.value } return { - connectionStatusLoading, - connectionStatus, - modelList, - initDefaultModel, - unloadModel, - updateModelList, - clearModelList, - updateConnectionStatus, + // Ollama + ollamaConnectionStatusLoading, + ollamaConnectionStatus, + ollamaModelList, + unloadOllamaModel, + updateOllamaModelList, + clearOllamaModelList, + updateOllamaConnectionStatus, + // LMStudio + lmStudioConnectionStatusLoading, + lmStudioConnectionStatus, + lmStudioModelList, + unloadLMStudioModel, + updateLMStudioModelList, + deleteOllamaModel, + clearLMStudioModelList, + updateLMStudioConnectionStatus, + // Common checkCurrentModelSupportVision, checkModelSupportThinking, + checkCurrentBackendStatus, + updateModelList, + modelList, } }) diff --git a/utils/rpc/background-fns.ts b/utils/rpc/background-fns.ts index 18eff385..535c6166 100644 --- a/utils/rpc/background-fns.ts +++ b/utils/rpc/background-fns.ts @@ -13,10 +13,11 @@ import { BackgroundCacheServiceManager } from '../../entrypoints/background/serv import { BackgroundChatHistoryServiceManager } from '../../entrypoints/background/services/chat-history-service' import { MODELS_NOT_SUPPORTED_FOR_STRUCTURED_OUTPUT } from '../constants' import { ContextMenuManager } from '../context-menu' -import { AiSDKError, AppError, CreateTabStreamCaptureError, FetchError, GenerateObjectSchemaError, ModelRequestError, UnknownError } from '../error' +import { AiSDKError, AppError, CreateTabStreamCaptureError, FetchError, fromError, GenerateObjectSchemaError, ModelRequestError, UnknownError } from '../error' import { parsePartialJson } from '../json/parser/parse-partial-json' -import { getModel, getModelUserConfig, ModelLoadingProgressEvent } from '../llm/models' -import { deleteModel, getLocalModelList, getRunningModelList, pullModel, showModelDetails, unloadModel } from '../llm/ollama' +import * as lmStudioUtils from '../llm/lm-studio' +import { getModel, getModelUserConfig, LLMEndpointType, ModelLoadingProgressEvent } from '../llm/models' +import * as ollamaUtils from '../llm/ollama' import { SchemaName, Schemas, selectSchema } from '../llm/output-schema' import { PromptBasedTool } from '../llm/tools/prompt-based/helpers' import { getWebLLMEngine, WebLLMSupportedModel } from '../llm/web-llm' @@ -87,21 +88,21 @@ const makeLoadingModelListener = (port: Browser.runtime.Port) => (ev: ModelLoadi }) } -const normalizeError = (_error: unknown) => { +const normalizeError = (_error: unknown, endpointType?: LLMEndpointType) => { const networkErrorMessages = ['NetworkError', 'Failed to fetch'] let error if (_error instanceof AppError) { error = _error } else if (_error instanceof Error && networkErrorMessages.some((msg) => _error.message.includes(msg))) { - error = new ModelRequestError(_error.message) + error = new ModelRequestError(_error.message, endpointType) } else if (AISDKError.isInstance(_error)) { error = new AiSDKError(_error.message) error.name = _error.name } else { - error = new UnknownError(`Unexpected error occurred during request: ${_error}`) + error = new UnknownError(String(_error)) } return error } @@ -120,12 +121,14 @@ const streamText = async (options: Pick { } const deleteOllamaModel = async (modelId: string) => { - await deleteModel(modelId) + await ollamaUtils.deleteModel(modelId) } const unloadOllamaModel = async (modelId: string) => { - await unloadModel(modelId) + await ollamaUtils.unloadModel(modelId) const start = Date.now() while (Date.now() - start < 10000) { - const modelList = await getRunningModelList() + const modelList = await ollamaUtils.getRunningModelList() if (!modelList.models.some((m) => m.model === modelId)) { break } @@ -485,8 +488,56 @@ const unloadOllamaModel = async (modelId: string) => { } } +const unloadLMStudioModel = async (identifier: string) => { + await lmStudioUtils.unloadModel(identifier) + const start = Date.now() + while (Date.now() - start < 10000) { + const modelList = await lmStudioUtils.getRunningModelList() + if (!modelList.models.some((m) => m.identifier === identifier)) { + break + } + await sleep(1000) + } +} + +const pullLMStudioModel = async (modelName: string) => { + const portName = `pullLMStudioModel-${Date.now().toString(32)}` + const abortController = new AbortController() + preparePortConnection(portName).then(async (port) => { + if (port.name !== portName) { + return + } + port.onDisconnect.addListener(() => { + abortController.abort() + logger.debug('port disconnected from client') + }) + logger.debug('Starting to pull LMStudio model', modelName) + const response = await lmStudioUtils.pullModel({ modelName, abortSignal: abortController.signal }) + try { + for await (const chunk of response) { + port.postMessage(chunk) + } + } + catch (_error: unknown) { + const error = fromError(_error) + logger.debug('[pullLMStudioModel] error', error) + if (error instanceof Error) { + port.postMessage({ error: error.message }) + } + else if (error instanceof AppError) { + port.postMessage({ error: error.message }) + } + else { + port.postMessage({ error: 'Unknown error' }) + } + } + port.disconnect() + }) + return { portName } +} + const showOllamaModelDetails = async (modelId: string) => { - return showModelDetails(modelId) + return ollamaUtils.showModelDetails(modelId) } const pullOllamaModel = async (modelId: string) => { @@ -501,7 +552,7 @@ const pullOllamaModel = async (modelId: string) => { logger.debug('port disconnected from client') abortController.abort() }) - const response = await pullModel(modelId) + const response = await ollamaUtils.pullModel(modelId) abortController.signal.addEventListener('abort', () => { response.abort() }) @@ -532,23 +583,6 @@ const pullOllamaModel = async (modelId: string) => { return { portName } } -async function testOllamaConnection() { - const userConfig = await getUserConfig() - try { - const baseUrl = userConfig.llm.baseUrl.get() - const origin = new URL(baseUrl).origin - const response = await fetch(origin) - if (!response.ok) return false - const text = await response.text() - if (text.includes('Ollama is running')) return true - else return false - } - catch (error: unknown) { - logger.error('error connecting to ollama api', error) - return false - } -} - function initWebLLMEngine(model: WebLLMSupportedModel) { try { const portName = `web-llm-${model}-${Date.now().toString(32)}` @@ -1020,12 +1054,18 @@ export const backgroundFunctions = { generateTextAsync, streamText, getAllTabs, - getLocalModelList, - getRunningModelList, + getOllamaLocalModelList: ollamaUtils.getLocalModelList, + getOllamaRunningModelList: ollamaUtils.getRunningModelList, + testOllamaConnection: ollamaUtils.testConnection, + unloadOllamaModel, + pullLMStudioModel, + getLMStudioModelList: lmStudioUtils.getLocalModelList, + getLMStudioRunningModelList: lmStudioUtils.getRunningModelList, + testLMStudioConnection: lmStudioUtils.testConnection, + unloadLMStudioModel, deleteOllamaModel, pullOllamaModel, showOllamaModelDetails, - unloadOllamaModel, openAndFetchUrlsContent, searchWebsites, generateObjectFromSchema, @@ -1047,7 +1087,6 @@ export const backgroundFunctions = { initCurrentModel, checkSupportWebLLM, getSystemMemoryInfo, - testOllamaConnection, captureVisibleTab, // Translation cache functions cacheGetEntry, @@ -1074,4 +1113,4 @@ export const backgroundFunctions = { showSettings: showSettingsForBackground, updateSidepanelModelList, } -;(self as unknown as { backgroundFunctions: unknown }).backgroundFunctions = backgroundFunctions + ; (self as unknown as { backgroundFunctions: unknown }).backgroundFunctions = backgroundFunctions diff --git a/utils/rpc/content-main-world-fns.ts b/utils/rpc/content-main-world-fns.ts index b68c9648..99907557 100644 --- a/utils/rpc/content-main-world-fns.ts +++ b/utils/rpc/content-main-world-fns.ts @@ -78,7 +78,7 @@ export async function checkBackendModelReady(model?: string): Promise<{ backend: const userConfig = await getUserConfig() try { if (userConfig.llm.endpointType.get() === 'ollama') { - const modelList = await c2bRpc.getLocalModelList() + const modelList = await c2bRpc.getOllamaLocalModelList() if (model === undefined) { return { backend: true, model: modelList.models.length > 0 } } @@ -86,6 +86,15 @@ export async function checkBackendModelReady(model?: string): Promise<{ backend: return { backend: true, model: modelList.models.some((m) => m.model === model) } } } + else if (userConfig.llm.endpointType.get() === 'lm-studio') { + const modelList = await c2bRpc.getLMStudioModelList() + if (model === undefined) { + return { backend: true, model: modelList.models.length > 0 } + } + else { + return { backend: true, model: modelList.models.some((m) => m.modelKey === model) } + } + } else if (userConfig.llm.endpointType.get() === 'web-llm') { return { backend: true, model: await c2bRpc.hasWebLLMModelInCache('Qwen3-0.6B-q4f16_1-MLC') } } diff --git a/utils/scroll-targets.ts b/utils/scroll-targets.ts index 87dc2ee7..5e2a971d 100644 --- a/utils/scroll-targets.ts +++ b/utils/scroll-targets.ts @@ -3,5 +3,6 @@ import { SettingsScrollTarget } from '@/types/scroll-targets' export const settingsScrollTargets: SettingsScrollTarget[] = [ 'quick-actions-block', 'model-download-section', - 'server-address-section', + 'ollama-server-address-section', + 'lm-studio-server-address-section', ] diff --git a/utils/user-config/helpers.test.ts b/utils/user-config/helpers.test.ts index bbd4e2b8..6628186b 100644 --- a/utils/user-config/helpers.test.ts +++ b/utils/user-config/helpers.test.ts @@ -40,7 +40,7 @@ describe('user config', () => { }) const { _getUserConfig: getUserConfig } = await import('./index') const userConfig = await getUserConfig() - expect(userConfig.llm.enableNumCtx.get()).toBe(false) + expect(userConfig.llm.backends.ollama.enableNumCtx.get()).toBe(false) }) it('enableNumCtx should be true when system memory is greater than 8GB', async () => { @@ -53,7 +53,7 @@ describe('user config', () => { }) const { _getUserConfig: getUserConfig } = await import('./index') const userConfig = await getUserConfig() - expect(userConfig.llm.enableNumCtx.get()).toBe(true) + expect(userConfig.llm.backends.ollama.enableNumCtx.get()).toBe(true) }) it('should migrate from old value', async () => { diff --git a/utils/user-config/index.ts b/utils/user-config/index.ts index ae27fa72..48eaf30e 100644 --- a/utils/user-config/index.ts +++ b/utils/user-config/index.ts @@ -75,13 +75,22 @@ export async function _getUserConfig() { llm: { defaultFirstTokenTimeout: await new Config('llm.firstTokenTimeout').default(60 * 1000).build(), // 60 seconds endpointType: await new Config('llm.endpointType').default('ollama' as LLMEndpointType).build(), - baseUrl: await new Config('llm.baseUrl').default('http://localhost:11434/api').build(), model: await new Config('llm.model').build(), apiKey: await new Config('llm.apiKey').default('ollama').build(), - numCtx: await new Config('llm.numCtx').default(1024 * 8).build(), - enableNumCtx: await new Config('llm.enableNumCtx').default(enableNumCtx).build(), reasoning: await new Config('llm.reasoning').default(true).build(), titleGenerationSystemPrompt: await new Config('llm.titleGenerationSystemPrompt').default(DEFAULT_CHAT_TITLE_GENERATION_SYSTEM_PROMPT).build(), + backends: { + ollama: { + numCtx: await new Config('llm.backends.ollama.numCtx').default(1024 * 8).build(), + enableNumCtx: await new Config('llm.backends.ollama.enableNumCtx').default(enableNumCtx).build(), + baseUrl: await new Config('llm.backends.ollama.baseUrl').default('http://localhost:11434/api').migrateFrom('llm.baseUrl', (v) => v).build(), + }, + lmStudio: { + numCtx: await new Config('llm.backends.lmStudio.numCtx').default(1024 * 8).build(), + enableNumCtx: await new Config('llm.backends.lmStudio.enableNumCtx').default(enableNumCtx).build(), + baseUrl: await new Config('llm.backends.lmStudio.baseUrl').default('http://localhost:1234/api').build(), + }, + }, }, browserAI: { polyfill: { @@ -123,6 +132,7 @@ export async function _getUserConfig() { thinkingVisibility: await new Config('chat.thinkingVisibility').default('preview' as 'hide' | 'preview' | 'full').build(), }, translation: { + endpointType: await new Config('translation.endpointType').default('ollama' as LLMEndpointType).build(), model: await new Config('translation.model').build(), targetLocale: await new Config('translation.targetLocale').default('zh' as LanguageCode).build(), systemPrompt: await new Config('translation.systemPrompt').default(DEFAULT_TRANSLATOR_SYSTEM_PROMPT).build(), @@ -160,6 +170,16 @@ export async function _getUserConfig() { systemPrompt: await new Config('writingTools.sparkle.systemPrompt').default(DEFAULT_WRITING_TOOLS_SPARKLE_SYSTEM_PROMPT).build(), }, }, + settings: { + blocks: { + ollamaConfig: { + open: await new Config('settings.blocks.ollamaConfig.open').default(true).build(), + }, + lmStudioConfig: { + open: await new Config('settings.blocks.lmStudioConfig.open').default(true).build(), + }, + }, + }, } } diff --git a/utils/web-request.ts b/utils/web-request.ts index 67bf065b..359dd672 100644 --- a/utils/web-request.ts +++ b/utils/web-request.ts @@ -4,28 +4,28 @@ import logger from '@/utils/logger' const log = logger.child('web-request') // a constants for removing old rules -const RULE_ID_REMOVE_ORIGIN = 1 +const RULE_ID_REMOVE_ORIGIN_FOR_OLLAMA_REQUEST = 1 const RULE_ID_REMOVE_DISPOSITION = 2 export function registerDeclarativeNetRequestRule() { // firefox has some bugs with declarativeNetRequest API, we use rules.json instead if (import.meta.env.FIREFOX) return - const URL_FILTER = /https?:\/\/[^/]*:11434\/.*/ + const OLLAMA_URL_FILTER = /https?:\/\/[^/]*:11434\/.*/ const { resolve, promise } = Promise.withResolvers() const timeout = setTimeout(() => { log.warn('Origin-rewrite rule timeout') resolve() - }, 1000) + }, 2000) browser.runtime.onInstalled.addListener(async () => { // reset the rules when the extension is installed or updated log.debug('Registering origin-rewrite rule', browser.runtime.id) await browser.declarativeNetRequest.updateDynamicRules({ - removeRuleIds: [RULE_ID_REMOVE_ORIGIN], + removeRuleIds: [RULE_ID_REMOVE_ORIGIN_FOR_OLLAMA_REQUEST], addRules: [ { - id: RULE_ID_REMOVE_ORIGIN, + id: RULE_ID_REMOVE_ORIGIN_FOR_OLLAMA_REQUEST, priority: 1, action: { type: browser.declarativeNetRequest.RuleActionType.MODIFY_HEADERS, @@ -37,7 +37,7 @@ export function registerDeclarativeNetRequestRule() { ], }, condition: { - regexFilter: URL_FILTER.source, + regexFilter: OLLAMA_URL_FILTER.source, initiatorDomains: [browser.runtime.id], resourceTypes: [browser.declarativeNetRequest.ResourceType.XMLHTTPREQUEST], }, diff --git a/wxt.config.ts b/wxt.config.ts index 5c356539..5b182e46 100644 --- a/wxt.config.ts +++ b/wxt.config.ts @@ -4,19 +4,27 @@ import { analyzer } from 'vite-bundle-analyzer' import { nodePolyfills } from 'vite-plugin-node-polyfills' import svgLoader from 'vite-svg-loader' import { defineConfig } from 'wxt' +import { Browser } from 'wxt/browser' import { version } from './package.json' import { EXTENSION_SHORT_NAME } from './utils/constants' +type ManifestPermissions = Browser.runtime.ManifestPermissions | (string & Record) + export const VERSION = version.split('-')[0] const IS_FIREFOX = process.argv.includes('firefox') +const IS_DEV = import.meta.env.NODE_ENV === 'development' const FIREFOX_EXTENSION_ID = '{48e0818d-6c94-43d4-9465-61ceb28080e3}' const ENABLE_BUNDLE_ANALYZER = process.argv.includes('--analyze') || process.env.ANALYZE === 'true' -const permissionsForChrome = ['system.memory'] -const permissionsForFirefox = ['menus'] -const extraPermissions = IS_FIREFOX ? permissionsForFirefox : permissionsForChrome +const permissionsForChrome: ManifestPermissions[] = ['system.memory'] +const permissionsForFirefox: ManifestPermissions[] = ['menus'] +const permissionsForDev: ManifestPermissions[] = ['declarativeNetRequestFeedback'] +const extraPermissions: ManifestPermissions[] = [ + ...(IS_FIREFOX ? permissionsForFirefox : permissionsForChrome), + ...(IS_DEV ? permissionsForDev : []), +] const svgLoaderPlugin = svgLoader({ svgoConfig: { @@ -101,6 +109,6 @@ export default defineConfig({ world: 'MAIN', }, ], - host_permissions: ['*://*/*'], + host_permissions: ['*://*/*', 'ws://*/*', 'wss://*/*'], }, }) From d933b9357ded047181d6bc225525de2f2f740708 Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Wed, 10 Sep 2025 10:11:46 +0800 Subject: [PATCH 02/59] chore(package): update version to 1.10.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 1e079bd7..681b5efb 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "nativemind-extension", - "version": "1.9.0-beta.2", + "version": "1.10.0-beta.0", "private": false, "author": "NativeMind", "keywords": [ From 67c8c4fd614d8b17196ca4001865c4cc35faff90 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 10 Sep 2025 02:15:22 +0000 Subject: [PATCH 03/59] chore(release): v1.10.0-beta.1 --- CHANGELOG.md | 18 ++++++++++++++++++ package.json | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47a3317a..3bf5fd16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,24 @@ # Changelog +## v1.10.0-beta.1 + +[compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.9.0-beta.2...v1.10.0-beta.1) + +### 🚀 Enhancements + +- **agent:** Add task-click-page icon and update related references ([fef0052](https://github.com/NativeMindBrowser/NativeMindExtension/commit/fef0052)) +- **lm-studio:** Initial support for LM Studio ([0f318d9](https://github.com/NativeMindBrowser/NativeMindExtension/commit/0f318d9)) + +### 🏡 Chore + +- **logo:** Update extension logo in toolbar ([7335374](https://github.com/NativeMindBrowser/NativeMindExtension/commit/7335374)) +- **package:** Update version to 1.10.0 ([d933b93](https://github.com/NativeMindBrowser/NativeMindExtension/commit/d933b93)) + +### ❤️ Contributors + +- Tony Hu ([@tonyhu-012](http://github.com/tonyhu-012)) + ## v1.9.0-beta.2 [compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.9.0-beta.1...v1.9.0-beta.2) diff --git a/package.json b/package.json index 681b5efb..931a168a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "nativemind-extension", - "version": "1.10.0-beta.0", + "version": "1.10.0-beta.1", "private": false, "author": "NativeMind", "keywords": [ From 24d96361dd01329f39e2f638ef150d59c266c504 Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Wed, 10 Sep 2025 14:29:17 +0800 Subject: [PATCH 04/59] fix(lm-studio): return consistent structured response format --- .../lm-studio/chat-language-model.ts | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/utils/llm/providers/lm-studio/chat-language-model.ts b/utils/llm/providers/lm-studio/chat-language-model.ts index 51523ef4..d437ff9c 100644 --- a/utils/llm/providers/lm-studio/chat-language-model.ts +++ b/utils/llm/providers/lm-studio/chat-language-model.ts @@ -49,6 +49,7 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { } private async getArgs({ + mode, prompt, maxTokens, temperature, @@ -57,12 +58,19 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { frequencyPenalty, presencePenalty, stopSequences, - responseFormat, seed, abortSignal, }: Parameters[0]) { const warnings: LanguageModelV1CallWarning[] = [] + if (mode?.type === 'object-tool') { + warnings.push({ + type: 'unsupported-setting', + setting: 'responseFormat', + details: 'object-tool mode is not supported by LM Studio models, using object-json mode instead', + }) + } + const baseArgs = { signal: abortSignal, // standardized settings: @@ -72,14 +80,12 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { topK, frequencyPenalty, presencePenalty, - response_format: - responseFormat?.type === 'json' - ? responseFormat.schema != null - ? { - type: 'json' as const, - schema: JSON.stringify(responseFormat.schema), - } - : undefined + structured: + mode?.type === 'object-json' + ? { + type: 'json' as const, + jsonSchema: mode.schema, + } : undefined, stop: stopSequences, @@ -99,7 +105,7 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { const body = args - log.debug('doGenerate called', { args }) + log.debug('doGenerate called', { args, options }) const responseBody = await this.model.respond(args.messages, { signal: body.signal, @@ -108,6 +114,8 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { topKSampling: body.topK, temperature: body.temperature, stopStrings: body.stop, + contextOverflowPolicy: 'truncateMiddle', + structured: body.structured, }) const { messages: rawPrompt, ...rawSettings } = args @@ -130,7 +138,7 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { const { args, warnings } = await this.getArgs({ ...options }) const body = args - log.debug('doStream called', { args }) + log.debug('doStream called', { args, options }) const resp = this.model.respond(args.messages, { signal: body.signal, maxTokens: body.maxTokens, @@ -138,6 +146,8 @@ export class LMStudioChatLanguageModel implements LanguageModelV1 { topKSampling: body.topK, temperature: body.temperature, stopStrings: body.stop, + contextOverflowPolicy: 'truncateMiddle', + structured: body.structured, }) const { messages: rawPrompt, ...rawSettings } = args From 170af851909209af3bb2a09454722e40146ee8f4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 10 Sep 2025 06:32:50 +0000 Subject: [PATCH 05/59] chore(release): v1.10.0-beta.2 --- CHANGELOG.md | 12 ++++++++++++ package.json | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3bf5fd16..91745844 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,18 @@ # Changelog +## v1.10.0-beta.2 + +[compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.10.0-beta.1...v1.10.0-beta.2) + +### 🩹 Fixes + +- **lm-studio:** Return consistent structured response format ([24d9636](https://github.com/NativeMindBrowser/NativeMindExtension/commit/24d9636)) + +### ❤️ Contributors + +- Tony Hu ([@tonyhu-012](http://github.com/tonyhu-012)) + ## v1.10.0-beta.1 [compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.9.0-beta.2...v1.10.0-beta.1) diff --git a/package.json b/package.json index 931a168a..5083c3ef 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "nativemind-extension", - "version": "1.10.0-beta.1", + "version": "1.10.0-beta.2", "private": false, "author": "NativeMind", "keywords": [ From e8793b7fdb08a055516656dcebaaafdd1d239f4a Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Thu, 11 Sep 2025 16:42:38 +0800 Subject: [PATCH 06/59] feat(tests): add onboarding and settings tests, enhance mock responses --- package.json | 2 +- tests/e2e/mock-responses/ollama/ps.ts | 4 +++ tests/e2e/mock-responses/ollama/show.ts | 20 +++++++++++++ tests/e2e/mock-responses/ollama/tags.ts | 4 +++ tests/e2e/onboarding.test.ts | 37 +++++++++++++++++++++++ tests/e2e/settings.test.ts | 23 ++++++++++++++ tests/e2e/utils.ts | 40 ++++++++++++++++++++----- utils/i18n/index.ts | 29 +++++++++--------- utils/rpc/utils.ts | 7 +++-- utils/user-config/index.ts | 4 +-- 10 files changed, 141 insertions(+), 29 deletions(-) create mode 100644 tests/e2e/mock-responses/ollama/show.ts create mode 100644 tests/e2e/onboarding.test.ts create mode 100644 tests/e2e/settings.test.ts diff --git a/package.json b/package.json index 5083c3ef..e3451885 100644 --- a/package.json +++ b/package.json @@ -36,7 +36,7 @@ "lint": "eslint", "lint:fix": "eslint --fix", "compile": "vue-tsc --noEmit", - "test:e2e": "playwright test --config=playwright.config.ts", + "test:e2e": "PW_EXPERIMENTAL_SERVICE_WORKER_NETWORK_EVENTS=1 playwright test --config=playwright.config.ts", "test:unit": "vitest run", "dev": "NODE_ENV=development wxt", "dev:edge": "NODE_ENV=development wxt -b edge", diff --git a/tests/e2e/mock-responses/ollama/ps.ts b/tests/e2e/mock-responses/ollama/ps.ts index a6ac2fda..69cdfb41 100644 --- a/tests/e2e/mock-responses/ollama/ps.ts +++ b/tests/e2e/mock-responses/ollama/ps.ts @@ -20,3 +20,7 @@ export const ollamaPsResponse = { }, ], } + +export const ollamaPsEmptyResponse = { + models: [], +} diff --git a/tests/e2e/mock-responses/ollama/show.ts b/tests/e2e/mock-responses/ollama/show.ts new file mode 100644 index 00000000..c0a03092 --- /dev/null +++ b/tests/e2e/mock-responses/ollama/show.ts @@ -0,0 +1,20 @@ +export const ollamaShowResponse = { + license: 'xxx', + modelfile: 'xxx', + parameters: 'xxx', + template: 'xxx', + details: { + parent_model: '', + format: 'gguf', + family: 'gemma3', + families: [ + 'gemma3', + ], + parameter_size: '12.2B', + quantization_level: 'Q4_K_M', + }, + capabilities: [ + 'completion', + ], + modified_at: '2025-08-29T15:36:31.295987698+08:00', +} diff --git a/tests/e2e/mock-responses/ollama/tags.ts b/tests/e2e/mock-responses/ollama/tags.ts index d75a3d82..808fe875 100644 --- a/tests/e2e/mock-responses/ollama/tags.ts +++ b/tests/e2e/mock-responses/ollama/tags.ts @@ -155,3 +155,7 @@ export const ollamaTagsResponse = { }, ], } + +export const ollamaTagsEmptyResponse = { + models: [], +} diff --git a/tests/e2e/onboarding.test.ts b/tests/e2e/onboarding.test.ts new file mode 100644 index 00000000..43faa2e8 --- /dev/null +++ b/tests/e2e/onboarding.test.ts @@ -0,0 +1,37 @@ +import { ollamaPsEmptyResponse } from './mock-responses/ollama/ps' +import { ollamaShowResponse } from './mock-responses/ollama/show' +import { ollamaTagsEmptyResponse } from './mock-responses/ollama/tags' +import { expect, test } from './utils' + +test('show download model tutorial if ollama/lm studio is running', async ({ page, extensionId, context }) => { + context.route('http://localhost:11434/', async (route) => { + await route.fulfill({ body: 'Ollama is running', contentType: 'plain/text' }) + }) + context.route(/http:\/\/localhost:1234/, async (route) => { + await route.abort('connectionfailed') + }) + context.route(/\/api\/ps/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaPsEmptyResponse), contentType: 'application/json' }) + }) + context.route(/\/api\/tags/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaTagsEmptyResponse), contentType: 'application/json' }) + }) + context.route(/\/api\/show/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaShowResponse), contentType: 'application/json' }) + }) + await page.goto('chrome-extension://' + extensionId + '/sidepanel.html') + // @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types + await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' })) + await expect(page.getByText('Download a model to begin').first()).toBeVisible({ timeout: 15000 }) +}) + +test('show startup tutorial if ollama is not running', async ({ page, extensionId, context, extension }) => { + context.route(/http:\/\/localhost:11434/, async (route) => { + await route.abort('connectionfailed') + }) + await extension.setStorageItem('llm.backends.lmStudio.baseUrl', 'ws://localhost:12345') // set to a non-existing ws url to avoid lm studio connection + await page.goto('chrome-extension://' + extensionId + '/sidepanel.html') + // @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types + await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' })) + await expect(page.getByText('How do you want to run AI locally').first()).toBeVisible({ timeout: 15000 }) +}) diff --git a/tests/e2e/settings.test.ts b/tests/e2e/settings.test.ts new file mode 100644 index 00000000..3b9d2052 --- /dev/null +++ b/tests/e2e/settings.test.ts @@ -0,0 +1,23 @@ +import { ollamaPsResponse } from './mock-responses/ollama/ps' +import { ollamaShowResponse } from './mock-responses/ollama/show' +import { ollamaTagsResponse } from './mock-responses/ollama/tags' +import { expect, test } from './utils' + +test('should load extension settings', async ({ page, extensionId, context }) => { + context.route('http://localhost:11434/', async (route) => { + await route.fulfill({ body: 'Ollama is running', contentType: 'plain/text' }) + }) + context.route(/\/api\/ps/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaPsResponse), contentType: 'application/json' }) + }) + context.route(/\/api\/tags/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaTagsResponse), contentType: 'application/json' }) + }) + context.route(/\/api\/show/, async (route) => { + await route.fulfill({ body: JSON.stringify(ollamaShowResponse), contentType: 'application/json' }) + }) + await page.goto('chrome-extension://' + extensionId + '/settings.html') + // @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types + await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' })) + await expect(page.getByText('Running Models (1)').first()).toBeVisible({ timeout: 15000 }) +}) diff --git a/tests/e2e/utils.ts b/tests/e2e/utils.ts index 9e3bb9cd..39677d53 100644 --- a/tests/e2e/utils.ts +++ b/tests/e2e/utils.ts @@ -1,7 +1,9 @@ -import { type BrowserContext, chromium, Page, test as base } from '@playwright/test' +import { type BrowserContext, chromium, Page, test as base, Worker } from '@playwright/test' import fs from 'fs' import path from 'path' +import { sleep } from '@/utils/sleep' + import { ollamaPsResponse } from './mock-responses/ollama/ps' import { ollamaTagsResponse } from './mock-responses/ollama/tags' @@ -10,7 +12,9 @@ type Extended = { extensionId: string extension: { activateActiveTab: (() => Promise) + setStorageItem: (key: string, value: string | number | boolean) => Promise } + extensionBackground: Worker } const ALLOWED_ENV = ['production', 'beta', 'development'] as const @@ -29,10 +33,25 @@ if (fs.existsSync(pathToExtension) === false) { throw new Error(`Extension path does not exist: ${pathToExtension}. Please build the extension first.`) } +async function waitForServiceWorker(context: BrowserContext): Promise { + let [background] = context.serviceWorkers() + if (!background) { + background = await context.waitForEvent('serviceworker') + } + + // @ts-expect-error - self.registration is not recognized in types + while (await background.evaluate(() => self.registration?.active?.state) !== 'activated') { + await sleep(300) + } + + return background +} + export const test = base.extend({ - context: async ({ context: _ }, use) => { + context: async ({ context: _, locale }, use) => { const context = await chromium.launchPersistentContext('', { channel: 'chromium', + locale: locale || 'en-US', args: [ `--disable-extensions-except=${pathToExtension}`, `--load-extension=${pathToExtension}`, @@ -42,17 +61,13 @@ export const test = base.extend({ await context.close() }, extensionId: async ({ context }, use) => { - let [background] = context.serviceWorkers() - if (!background) - background = await context.waitForEvent('serviceworker') + const background = await waitForServiceWorker(context) const extensionId = background.url().split('/')[2] await use(extensionId) }, extension: async ({ context }, use) => { - let [background] = context.serviceWorkers() - if (!background) - background = await context.waitForEvent('serviceworker') + const background = await waitForServiceWorker(context) use({ activateActiveTab: async () => await background.evaluate(() => { @@ -62,9 +77,18 @@ export const test = base.extend({ chrome.action.onClicked.dispatch(tabs[0]) }) }), + setStorageItem: async (key: string, value: string | number | boolean) => + await background.evaluate( + ([k, v]) => { + // @ts-expect-error - this is a Chrome extension API + chrome.storage.local.set({ [k]: v }) + }, + [key, value], + ), }) }, }) + export const expect = test.expect interface OllamaMockOptions { diff --git a/utils/i18n/index.ts b/utils/i18n/index.ts index b4448fa5..24d7d5db 100644 --- a/utils/i18n/index.ts +++ b/utils/i18n/index.ts @@ -1,3 +1,4 @@ +import { get as getValueOfPath } from 'es-toolkit/compat' import { watch } from 'vue' import type { ComposerTranslation as OriginComposerTranslation } from 'vue-i18n' import { createI18n, useI18n as _useI18n } from 'vue-i18n' @@ -26,6 +27,7 @@ import { SUPPORTED_LOCALES, SupportedLocaleCode } from './constants' // Type-define 'en-US' as the master schema for the resource type MessageSchema = typeof en export type TranslationKey = JsonPaths +export type ComposerTranslation = OriginComposerTranslation const messages = { en, @@ -97,6 +99,7 @@ export const useI18n = only(['content', 'popup', 'sidepanel', 'settings'], () => return { ...i18n, formatDuration: (seconds: number) => formatDuration(i18n.t, seconds), + getAllLocaleValues: (key: TranslationKey) => getAllLocaleValues(i18n, key), } } }) @@ -104,27 +107,25 @@ export const useI18n = only(['content', 'popup', 'sidepanel', 'settings'], () => // this i18n function can be used in any context, including outside Vue components and background scripts, but it's a async function export async function useGlobalI18n() { const i18n = await createI18nInstance() - const composer = i18n.global as unknown as ReturnType + const composer = i18n.global as unknown as ReturnType> return { ...composer, formatDuration: (seconds: number) => formatDuration(composer.t, seconds), + getAllLocaleValues: (key: TranslationKey) => getAllLocaleValues(composer, key), } } -export type ComposerTranslation = OriginComposerTranslation - -export async function getAllLocaleValues(key: string) { - const i18n = await createI18nInstance() - const result: string[] = [] +async function getAllLocaleValues(instance: ReturnType>, key: TranslationKey) { + const result: { locale: SupportedLocaleCode, message: string }[] = [] - for (const locale of i18n.global.availableLocales) { - const messages = i18n.global.getLocaleMessage(locale) // Get the complete message object for this locale - // Support nested keys, e.g., "home.title" - // FIXME - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const value = key.split('.').reduce((obj, k) => obj?.[k], messages as any) - if (value !== undefined) { - result.push(value) + for (const locale of instance.availableLocales) { + const messages = instance.getLocaleMessage(locale) // Get the complete message object for this locale + const value = getValueOfPath(messages, key) + if (typeof value === 'string') { + result.push({ + locale, + message: value, + }) } } diff --git a/utils/rpc/utils.ts b/utils/rpc/utils.ts index aa5f48c4..7c615764 100644 --- a/utils/rpc/utils.ts +++ b/utils/rpc/utils.ts @@ -2,7 +2,7 @@ import { Browser, browser } from 'wxt/browser' import { AssistantMessageV1, ChatHistoryV1, UserMessageV1 } from '@/types/chat' -import { getAllLocaleValues } from '../i18n' +import { useGlobalI18n } from '../i18n' export function preparePortConnection(portName: string) { return new Promise((resolve, reject) => { @@ -83,8 +83,9 @@ export async function shouldGenerateChatTitle(chatHistory: ChatHistoryV1): Promi const assistantMessages = completedMessages.filter((item) => item.role === 'assistant') // Check if title is likely a default "New Chat" title in various languages - const defaultTitles = await getAllLocaleValues('chat_history.new_chat') - const isDefaultTitle = defaultTitles.some((title) => chatHistory.title === title) + const i18n = await useGlobalI18n() + const defaultTitles = await i18n.getAllLocaleValues('chat_history.new_chat') + const isDefaultTitle = defaultTitles.some((title) => chatHistory.title === title.message) // Should generate title when: // 1. Chat title is still the default "New Chat" title diff --git a/utils/user-config/index.ts b/utils/user-config/index.ts index 48eaf30e..09369d5e 100644 --- a/utils/user-config/index.ts +++ b/utils/user-config/index.ts @@ -68,9 +68,7 @@ export async function _getUserConfig() { return { locale: { - current: await new Config( - 'locale.current', - ).build(), + current: await new Config('locale.current').build(), }, llm: { defaultFirstTokenTimeout: await new Config('llm.firstTokenTimeout').default(60 * 1000).build(), // 60 seconds From a60f489a025e67a3d81efed2c681a8c68dd4d199 Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Fri, 12 Sep 2025 17:12:33 +0800 Subject: [PATCH 07/59] feat(chat): add toggle to enable online search and its settings - Implemented migration for old chat history records to include online search settings. - Created OnlineSearchSwitch component for toggling online search. - Updated ThinkingModeSwitch component for improved styling and functionality. - Integrated OnlineSearchSwitch into the Chat component toolbar. - Enhanced Chat class to manage online search settings in user configuration. - Added checks in tool calls to disable online search if not enabled in settings. - Updated localization files to include translations for online search tool. - Modified user configuration to support reactive default values for online search settings. --- assets/icons/online-search.svg | 16 +++ components/ModelSelector.vue | 22 +++- components/Selector.vue | 3 +- entrypoints/background/database/types.ts | 1 + .../services/chat-history-service.ts | 22 ++-- .../components/Chat/OnlineSearchSwitch.vue | 49 ++++++++ .../components/Chat/ThinkingModeSwitch.vue | 57 +++------ .../sidepanel/components/Chat/index.vue | 118 +++++++++--------- entrypoints/sidepanel/utils/chat/chat.ts | 21 ++-- .../sidepanel/utils/chat/tool-calls/index.ts | 12 ++ locales/de.json | 5 + locales/en.json | 5 + locales/es.json | 5 + locales/fr.json | 5 + locales/id.json | 5 + locales/ja.json | 5 + locales/ko.json | 5 + locales/pt.json | 5 + locales/ru.json | 5 + locales/th.json | 5 + locales/vi.json | 5 + locales/zh-CN.json | 5 + locales/zh-TW.json | 5 + types/chat.ts | 1 + utils/llm/tools/prompt-based/tools.ts | 30 +++-- utils/pinia-store/store.ts | 17 +++ utils/user-config/defaults.ts | 77 ++++-------- utils/user-config/helpers.test.ts | 14 +++ utils/user-config/helpers.ts | 63 ++++------ utils/user-config/index.ts | 20 ++- 30 files changed, 377 insertions(+), 231 deletions(-) create mode 100644 assets/icons/online-search.svg create mode 100644 entrypoints/sidepanel/components/Chat/OnlineSearchSwitch.vue diff --git a/assets/icons/online-search.svg b/assets/icons/online-search.svg new file mode 100644 index 00000000..2bf93d80 --- /dev/null +++ b/assets/icons/online-search.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + \ No newline at end of file diff --git a/components/ModelSelector.vue b/components/ModelSelector.vue index 9ca65fa8..9af8bde6 100644 --- a/components/ModelSelector.vue +++ b/components/ModelSelector.vue @@ -44,6 +44,9 @@ {{ option.label }} +
+ +
⚠️ No model
@@ -52,7 +55,13 @@
- +
+ +
+ {{ option?.label || t('settings.models.no_model') }} @@ -131,6 +140,7 @@ import { getUserConfig } from '@/utils/user-config' import { classNames } from '@/utils/vue/utils' import ExhaustiveError from './ExhaustiveError.vue' +import Loading from './Loading.vue' import Selector from './Selector.vue' import Button from './ui/Button.vue' import Divider from './ui/Divider.vue' @@ -150,8 +160,8 @@ const props = withDefaults(defineProps<{ }) const { t } = useI18n() -const { modelList: composedModelList } = toRefs(useLLMBackendStatusStore()) -const { updateModelList, updateOllamaModelList, updateLMStudioModelList } = useLLMBackendStatusStore() +const { modelList: composedModelList, modelListUpdating } = toRefs(useLLMBackendStatusStore()) +const { updateModelList } = useLLMBackendStatusStore() only(['sidepanel'], () => { const removeListener = registerSidepanelRpcEvent('updateModelList', async () => await updateModelList()) @@ -240,7 +250,8 @@ const onClick = () => { } } -watch(modelList, (modelList) => { +watch([modelList, modelListUpdating], ([modelList, updating]) => { + if (updating) return if (modelList.length === 0) { commonModel.value = undefined translationModel.value = undefined @@ -262,8 +273,7 @@ watch([endpointType, selectedModel], async () => { updateModelList() }) -watch(ollamaBaseUrl, async () => updateOllamaModelList()) -watch(lmStudioBaseUrl, async () => updateLMStudioModelList()) +watch([ollamaBaseUrl, lmStudioBaseUrl], async () => updateModelList()) onMounted(async () => { updateModelList() diff --git a/components/Selector.vue b/components/Selector.vue index a9d0b6c3..84d57289 100644 --- a/components/Selector.vue +++ b/components/Selector.vue @@ -6,7 +6,7 @@
@@ -132,6 +132,7 @@ import { Component, computed, FunctionalComponent, Ref, ref, watch, watchEffect import { useInjectContext } from '@/composables/useInjectContext' import { useZIndex } from '@/composables/useZIndex' +import { classNames } from '@/utils/vue/utils' import ScrollContainer from './ScrollContainer.vue' import Button from './ui/Button.vue' diff --git a/entrypoints/background/database/types.ts b/entrypoints/background/database/types.ts index 3143c102..8caef6cb 100644 --- a/entrypoints/background/database/types.ts +++ b/entrypoints/background/database/types.ts @@ -10,6 +10,7 @@ export interface ChatHistoryRecord { history: string // JSON serialized HistoryItemV1[] contextUpdateInfo?: string // JSON serialized contextUpdateInfo from ChatHistoryV1 reasoningEnabled?: boolean // reasoning setting for this chat + onlineSearchEnabled?: boolean // online search setting for this chat, default is true createdAt: number updatedAt: number } diff --git a/entrypoints/background/services/chat-history-service.ts b/entrypoints/background/services/chat-history-service.ts index 9d82d636..70ee8faa 100644 --- a/entrypoints/background/services/chat-history-service.ts +++ b/entrypoints/background/services/chat-history-service.ts @@ -64,6 +64,18 @@ export class BackgroundChatHistoryService { } } + migrateFromOldHistoryRecord(record: ChatHistoryRecord): ChatHistoryV1 { + return { + id: record.id, + title: record.title, + lastInteractedAt: record.lastInteractedAt, + contextUpdateInfo: record.contextUpdateInfo ? JSON.parse(record.contextUpdateInfo) : undefined, + reasoningEnabled: record.reasoningEnabled, + onlineSearchEnabled: record.onlineSearchEnabled ?? true, // default to true if undefined for backward compatibility + history: JSON.parse(record.history) as HistoryItemV1[], + } + } + /** * Get chat history by ID */ @@ -75,14 +87,7 @@ export class BackgroundChatHistoryService { const record = await db.get(CHAT_OBJECT_STORES.CHAT_HISTORY, chatId) if (!record) return null - return { - id: record.id, - title: record.title, - lastInteractedAt: record.lastInteractedAt, - contextUpdateInfo: record.contextUpdateInfo ? JSON.parse(record.contextUpdateInfo) : undefined, - reasoningEnabled: record.reasoningEnabled, - history: JSON.parse(record.history) as HistoryItemV1[], - } + return this.migrateFromOldHistoryRecord(record) } catch (error) { log.error('Failed to get chat history:', error) @@ -108,6 +113,7 @@ export class BackgroundChatHistoryService { history: JSON.stringify(chatHistory.history), contextUpdateInfo: chatHistory.contextUpdateInfo ? JSON.stringify(chatHistory.contextUpdateInfo) : undefined, reasoningEnabled: chatHistory.reasoningEnabled, + onlineSearchEnabled: chatHistory.onlineSearchEnabled, createdAt: now, // Will be overwritten if record exists updatedAt: now, } diff --git a/entrypoints/sidepanel/components/Chat/OnlineSearchSwitch.vue b/entrypoints/sidepanel/components/Chat/OnlineSearchSwitch.vue new file mode 100644 index 00000000..adc612bf --- /dev/null +++ b/entrypoints/sidepanel/components/Chat/OnlineSearchSwitch.vue @@ -0,0 +1,49 @@ + + + diff --git a/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue b/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue index 469fa837..df5256a8 100644 --- a/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue +++ b/entrypoints/sidepanel/components/Chat/ThinkingModeSwitch.vue @@ -1,38 +1,27 @@ @@ -147,18 +136,4 @@ watch([endpointType, currentModel], async () => { onMounted(async () => { await updateModelList() }) - -// Debug logging for development, temporarily keep -// watch([currentModel, endpointType, isModelSupportsThinking, isThinkingToggleable, isThinkingEnabled], -// ([model, endpoint, supportsThinking, toggleable, enabled]) => { -// logger.debug('ThinkingModeSwitch state:', { -// currentModel: model, -// endpointType: endpoint, -// isModelSupportsThinking: supportsThinking, -// isThinkingToggleable: toggleable, -// isThinkingEnabled: enabled, -// }) -// }, -// { immediate: true }, -// ) diff --git a/entrypoints/sidepanel/components/Chat/index.vue b/entrypoints/sidepanel/components/Chat/index.vue index 9a6d7222..5979d3d6 100644 --- a/entrypoints/sidepanel/components/Chat/index.vue +++ b/entrypoints/sidepanel/components/Chat/index.vue @@ -59,66 +59,67 @@ v-model:attachmentStorage="contextAttachmentStorage" />
-
- -
- -
-
- -
-
- -
- -
-
+
+ +
+
+ - -
+ + +
+
+ + +
+
- - + + +
@@ -156,6 +157,7 @@ import MessageAction from './Messages/Action.vue' import MessageTaskGroup from './Messages/AgentTaskGroup.vue' import MessageAssistant from './Messages/Assistant.vue' import MessageTask from './Messages/Task.vue' +import OnlineSearchSwitch from './OnlineSearchSwitch.vue' import ThinkingModeSwitch from './ThinkingModeSwitch.vue' const inputContainerRef = ref() diff --git a/entrypoints/sidepanel/utils/chat/chat.ts b/entrypoints/sidepanel/utils/chat/chat.ts index 28370297..587f9abf 100644 --- a/entrypoints/sidepanel/utils/chat/chat.ts +++ b/entrypoints/sidepanel/utils/chat/chat.ts @@ -314,12 +314,11 @@ export class Chat { title: defaultTitle, lastInteractedAt: Date.now(), reasoningEnabled: true, // Default to true for new chats + onlineSearchEnabled: true, // Default to true for new chats }) - // If this is a new chat (no existing history), set global reasoning to true - if (!existingChatHistory) { - userConfig.llm.reasoning.set(true) - } + userConfig.llm.reasoning.set(chatHistory.value.reasoningEnabled ?? true) + userConfig.chat.onlineSearch.enable.set(chatHistory.value.onlineSearchEnabled ?? true) const contextAttachments = ref(await s2bRpc.getContextAttachments(chatHistoryId.value) ?? { attachments: [], id: chatHistoryId.value, lastInteractedAt: Date.now() }) const chatList = ref([]) const updateChatList = async () => { @@ -360,11 +359,12 @@ export class Chat { watch(chatHistoryId, async (newId, oldId) => { if (newId === oldId) return - log.debug('[Chat] Switching to chat:', newId) + log.debug('Switching to chat:', newId) instance.stop() // Load the new chat data const existingNewChatHistory = await s2bRpc.getChatHistory(newId) + log.debug('Loaded chat history for new chat ID:', newId, existingNewChatHistory) const newChatHistory: ChatHistoryV1 = existingNewChatHistory ?? { history: [], id: newId, @@ -372,12 +372,9 @@ export class Chat { lastInteractedAt: Date.now(), contextUpdateInfo: undefined, reasoningEnabled: true, // Default to true for new chats + onlineSearchEnabled: true, // Default to true for new chats } - // If this is a new chat (no existing history), set global reasoning to true - if (!existingNewChatHistory) { - userConfig.llm.reasoning.set(true) - } const newContextAttachments: ContextAttachmentStorage = await s2bRpc.getContextAttachments(newId) ?? { attachments: [], id: newId, @@ -388,10 +385,8 @@ export class Chat { Object.assign(chatHistory.value, newChatHistory) Object.assign(contextAttachments.value, newContextAttachments) - // Restore the reasoning setting for this chat - if (newChatHistory.reasoningEnabled !== undefined) { - userConfig.llm.reasoning.set(newChatHistory.reasoningEnabled) - } + userConfig.llm.reasoning.set(newChatHistory.reasoningEnabled ?? true) + userConfig.chat.onlineSearch.enable.set(newChatHistory.onlineSearchEnabled ?? true) // Clean up any loading messages instance.historyManager.cleanupLoadingMessages() diff --git a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts index ce498e70..b977dc14 100644 --- a/entrypoints/sidepanel/utils/chat/tool-calls/index.ts +++ b/entrypoints/sidepanel/utils/chat/tool-calls/index.ts @@ -21,6 +21,18 @@ const logger = Logger.child('tool-calls-execute') export const executeSearchOnline: AgentToolCallExecute<'search_online'> = async ({ params, abortSignal, taskMessageModifier }) => { const { t } = await useGlobalI18n() + const userConfig = await getUserConfig() + const enableOnlineSearch = userConfig.chat.onlineSearch.enable.get() + if (!enableOnlineSearch) { + return [{ + type: 'tool-result', + results: { + query: params.query, + status: 'failed', + error_message: 'Online search is disabled in settings', + }, + }] + } const log = logger.child('tool:executeSearchOnline') const HARD_MAX_RESULTS = 10 const { query, max_results } = params diff --git a/locales/de.json b/locales/de.json index 609e4843..5be0cdc2 100644 --- a/locales/de.json +++ b/locales/de.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Versuchen Sie, auf einen Seitenlink zu klicken, bevor Sie Seiten anzeigen", "error_click_incorrect_link": "Versuchen Sie, auf den fehlerhaften Seitenlink zu klicken" } + }, + "tools": { + "online_search": { + "title": "Suchen" + } } }, "chat_history": { diff --git a/locales/en.json b/locales/en.json index 554bcf75..3a017642 100644 --- a/locales/en.json +++ b/locales/en.json @@ -281,6 +281,11 @@ "error_click_before_view_tab": "Try clicking a page link before viewing any pages", "error_click_incorrect_link": "Try clicking on the incorrect page link" } + }, + "tools": { + "online_search": { + "title": "Search" + } } }, "chat_history": { diff --git a/locales/es.json b/locales/es.json index e303285b..71358f72 100644 --- a/locales/es.json +++ b/locales/es.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Intenta hacer clic en un enlace de página antes de ver cualquier página", "error_click_incorrect_link": "Intenta hacer clic en el enlace de página incorrecto" } + }, + "tools": { + "online_search": { + "title": "Buscar" + } } }, "chat_history": { diff --git a/locales/fr.json b/locales/fr.json index 0977dbb8..3ee091c2 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Essayez de cliquer sur un lien de page avant de consulter des pages", "error_click_incorrect_link": "Essayez de cliquer sur le lien de page incorrect" } + }, + "tools": { + "online_search": { + "title": "Recherche" + } } }, "chat_history": { diff --git a/locales/id.json b/locales/id.json index f84dc95e..0e4c2098 100644 --- a/locales/id.json +++ b/locales/id.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Coba klik tautan halaman sebelum melihat halaman apa pun", "error_click_incorrect_link": "Coba klik pada tautan halaman yang salah" } + }, + "tools": { + "online_search": { + "title": "Cari" + } } }, "chat_history": { diff --git a/locales/ja.json b/locales/ja.json index 33a400af..affc2983 100644 --- a/locales/ja.json +++ b/locales/ja.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "ページを表示する前に、ページリンクをクリックしてみてください", "error_click_incorrect_link": "間違ったページリンクをクリックしてみてください" } + }, + "tools": { + "online_search": { + "title": "検索" + } } }, "chat_history": { diff --git a/locales/ko.json b/locales/ko.json index 3b5e997d..0ec87ec8 100644 --- a/locales/ko.json +++ b/locales/ko.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "페이지를 보기 전에 페이지 링크를 클릭해 보세요", "error_click_incorrect_link": "잘못된 페이지 링크를 클릭해 보세요" } + }, + "tools": { + "online_search": { + "title": "검색" + } } }, "chat_history": { diff --git a/locales/pt.json b/locales/pt.json index dd4e4e95..11f679d7 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Tente clicar em um link de página antes de visualizar qualquer página", "error_click_incorrect_link": "Tente clicar no link da página incorreta" } + }, + "tools": { + "online_search": { + "title": "Pesquisar" + } } }, "chat_history": { diff --git a/locales/ru.json b/locales/ru.json index c5dd560a..16d6ce31 100644 --- a/locales/ru.json +++ b/locales/ru.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Попробуйте нажать на ссылку страницы перед просмотром любых страниц", "error_click_incorrect_link": "Попробуйте нажать на неправильную ссылку страницы" } + }, + "tools": { + "online_search": { + "title": "Поиск" + } } }, "chat_history": { diff --git a/locales/th.json b/locales/th.json index 3442f27f..d72486af 100644 --- a/locales/th.json +++ b/locales/th.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "ลองคลิกลิงก์หน้าเว็บก่อนดูหน้าใดๆ", "error_click_incorrect_link": "ลองคลิกที่ลิงก์หน้าที่ไม่ถูกต้อง" } + }, + "tools": { + "online_search": { + "title": "ค้นหา" + } } }, "chat_history": { diff --git a/locales/vi.json b/locales/vi.json index 94b915c7..06261216 100644 --- a/locales/vi.json +++ b/locales/vi.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "Hãy thử nhấp vào một liên kết trang trước khi xem bất kỳ trang nào", "error_click_incorrect_link": "Hãy thử nhấp vào liên kết trang không chính xác" } + }, + "tools": { + "online_search": { + "title": "Tìm kiếm" + } } }, "chat_history": { diff --git a/locales/zh-CN.json b/locales/zh-CN.json index 4dd460cd..a306bc50 100644 --- a/locales/zh-CN.json +++ b/locales/zh-CN.json @@ -281,6 +281,11 @@ "error_click_before_view_tab": "尝试在尚未查看任何页面时点击页面链接", "error_click_incorrect_link": "尝试点击错误的页面链接" } + }, + "tools": { + "online_search": { + "title": "搜索" + } } }, "chat_history": { diff --git a/locales/zh-TW.json b/locales/zh-TW.json index ed446a26..67524005 100644 --- a/locales/zh-TW.json +++ b/locales/zh-TW.json @@ -77,6 +77,11 @@ "error_click_before_view_tab": "在查看任何頁面之前,請先嘗試點擊頁面連結", "error_click_incorrect_link": "請嘗試點擊錯誤的頁面連結" } + }, + "tools": { + "online_search": { + "title": "搜尋" + } } }, "chat_history": { diff --git a/types/chat.ts b/types/chat.ts index 693b69f7..60835840 100644 --- a/types/chat.ts +++ b/types/chat.ts @@ -181,6 +181,7 @@ export type ChatHistoryV1 = { lastAttachmentIds: string[] } reasoningEnabled?: boolean // reasoning setting for this chat + onlineSearchEnabled: boolean // online search setting for this chat, default is true history: HistoryItemV1[] } diff --git a/utils/llm/tools/prompt-based/tools.ts b/utils/llm/tools/prompt-based/tools.ts index 0c632eb0..536ff56b 100644 --- a/utils/llm/tools/prompt-based/tools.ts +++ b/utils/llm/tools/prompt-based/tools.ts @@ -2,7 +2,11 @@ import { z } from 'zod' import { InferredParams, PromptBasedTool } from './helpers' -export const viewTabTool = new PromptBasedTool('view_tab', 'View complete content of a specific tab with interactive elements as clickable IDs', { +export const viewTabTool = new PromptBasedTool('view_tab', 'View complete content of a specific tab', { + tab_id: z.string().min(1).describe(''), +}) + +export const viewTabForWithInteractiveElementsTool = new PromptBasedTool('view_tab', 'View complete content of a specific tab with interactive elements as clickable IDs', { tab_id: z.string().min(1).describe(''), }) @@ -19,7 +23,11 @@ export const searchOnlineTool = new PromptBasedTool('search_online', 'Search for max_results: z.coerce.number().min(1).max(20).default(5).describe('5'), }) -export const fetchPageTool = new PromptBasedTool('fetch_page', 'Get complete content from a specific web page with interactive elements as clickable IDs', { +export const fetchPageTool = new PromptBasedTool('fetch_page', 'Get detailed content from specific web pages', { + url: z.string().url().describe(''), +}) + +export const fetchPageWithInteractiveElementsTool = new PromptBasedTool('fetch_page', 'Get complete content from a specific web page with interactive elements as clickable IDs', { url: z.string().url().describe(''), }) @@ -40,14 +48,16 @@ export const promptBasedTools = [ pageClickTool, ] -export const chatDefaultPromptBasedTools = [ - viewTabTool, - viewPdfTool, - viewImageTool, - searchOnlineTool, - fetchPageTool, - pageClickTool, -] +export const promptBasedToolCollections = { + browserUse: { + onlineSearch: [viewTabForWithInteractiveElementsTool, viewPdfTool, viewImageTool, searchOnlineTool, fetchPageWithInteractiveElementsTool, pageClickTool], + nonOnlineSearch: [viewTabForWithInteractiveElementsTool, viewPdfTool, viewImageTool, fetchPageWithInteractiveElementsTool, pageClickTool], + }, + nonBrowserUse: { + onlineSearch: [viewTabTool, viewPdfTool, viewImageTool, searchOnlineTool, fetchPageTool], + nonOnlineSearch: [viewTabTool, viewPdfTool, viewImageTool, fetchPageTool], + }, +} export type PromptBasedToolType = typeof promptBasedTools[number] export type ExtractToolWithParams = { diff --git a/utils/pinia-store/store.ts b/utils/pinia-store/store.ts index 523dd69c..d4690431 100644 --- a/utils/pinia-store/store.ts +++ b/utils/pinia-store/store.ts @@ -21,9 +21,11 @@ const rpc = forRuntimes({ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => { // Ollama model list and connection status const ollamaModelList = ref([]) + const ollamaModelListUpdating = ref(false) const ollamaConnectionStatus = ref<'connected' | 'error' | 'unconnected'>('unconnected') const updateOllamaModelList = async (): Promise => { try { + ollamaModelListUpdating.value = true const response = await rpc.getOllamaLocalModelList() ollamaConnectionStatus.value = 'connected' log.debug('Model list fetched:', response) @@ -44,6 +46,9 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => ollamaConnectionStatus.value = 'error' return [] } + finally { + ollamaModelListUpdating.value = false + } } const clearOllamaModelList = () => { ollamaModelList.value = [] @@ -69,8 +74,10 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => // LMStudio model list and connection status const lmStudioModelList = ref([]) + const lmStudioModelListUpdating = ref(false) const updateLMStudioModelList = async (): Promise => { try { + lmStudioModelListUpdating.value = true const response = await rpc.getLMStudioModelList() const runningModels = await rpc.getLMStudioRunningModelList().catch(() => ({ models: [] })) log.debug('LMStudio Model list fetched:', response, runningModels) @@ -87,6 +94,9 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => log.error('Failed to fetch LMStudio model list:', error) return [] } + finally { + lmStudioModelListUpdating.value = false + } } const unloadLMStudioModel = async (identifier: string) => { @@ -146,6 +156,10 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => ] }) + const modelListUpdating = computed(() => { + return ollamaModelListUpdating.value || lmStudioModelListUpdating.value + }) + // this function has side effects: it may change the common model in user config const checkCurrentBackendStatus = async () => { const userConfig = await getUserConfig() @@ -193,6 +207,7 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => ollamaConnectionStatusLoading, ollamaConnectionStatus, ollamaModelList, + ollamaModelListUpdating, unloadOllamaModel, updateOllamaModelList, clearOllamaModelList, @@ -201,6 +216,7 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => lmStudioConnectionStatusLoading, lmStudioConnectionStatus, lmStudioModelList, + lmStudioModelListUpdating, unloadLMStudioModel, updateLMStudioModelList, deleteOllamaModel, @@ -212,5 +228,6 @@ export const useLLMBackendStatusStore = defineStore('llm-backend-status', () => checkCurrentBackendStatus, updateModelList, modelList, + modelListUpdating, } }) diff --git a/utils/user-config/defaults.ts b/utils/user-config/defaults.ts index 7d7adb05..ffe7d6be 100644 --- a/utils/user-config/defaults.ts +++ b/utils/user-config/defaults.ts @@ -1,7 +1,7 @@ -import { chatDefaultPromptBasedTools } from '../llm/tools/prompt-based/tools' +import { PromptBasedToolType } from '../llm/tools/prompt-based/tools' import { PromptBasedToolBuilder, renderPrompt } from '../prompts/helpers' -export const DEFAULT_CHAT_SYSTEM_PROMPT_WITH_TOOLS = `You are an intelligent AI assistant integrated into a browser extension called NativeMind. Your primary role is to help users understand web content, answer questions, and provide comprehensive assistance based on available resources. +export const DEFAULT_CHAT_SYSTEM_PROMPT_WITH_TOOLS = (tools: PromptBasedToolType[]) => `You are an intelligent AI assistant integrated into a browser extension called NativeMind. Your primary role is to help users understand web content, answer questions, and provide comprehensive assistance based on available resources. # LANGUAGE POLICY 1. Detect the primary human language of @@ -55,51 +55,7 @@ Required tool usage: # AVAILABLE TOOLS: -## view_tab -Purpose: View complete content of a specific tab -Format: - - - - - - -## view_pdf -Purpose: View content of a specific PDF -Format: - - - - - - -## view_image -Purpose: Analyze a specific image -Format: - - - - - - -## search_online -Purpose: Search for current and latest information -Format: - - - - - - - -## fetch_page -Purpose: Get detailed content from specific web pages -Format: - - - - - +${tools.map((tool) => renderPrompt`${new PromptBasedToolBuilder(tool)}`).join('\n\n')} # WORKFLOW: @@ -160,7 +116,9 @@ Your responses should be: - Clear about which source information comes from by using proper citations ` -export const DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE = renderPrompt`You are an intelligent AI assistant integrated into a browser extension called NativeMind. Your primary role is to help users understand web content, answer questions, and provide comprehensive assistance based on available resources. +export const DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE = (tools: PromptBasedToolType[]) => { + const hasOnlineSearch = tools.some((t) => t.toolName === 'search_online') + return renderPrompt`You are an intelligent AI assistant integrated into a browser extension called NativeMind. Your primary role is to help users understand web content, answer questions, and provide comprehensive assistance based on available resources. # LANGUAGE POLICY @@ -188,7 +146,7 @@ MANDATORY PRIORITY ORDER - Follow this sequence for ALL queries: 1. SELECTED TAB FIRST: For ANY user query, evaluate if the currently selected tab (marked as SELECTED) might be relevant. If unsure about relevance, always view the selected tab first. 2. Other Available Resources: Check other tabs, PDFs, or images that might contain relevant content 3. Click on Links Within Resources: Use click to explore links within viewed content when relevant elements are identified -4. External Search LAST: Only use search_online when existing resources are insufficient or clearly unrelated +${hasOnlineSearch ? '4. External Search LAST: Only use search_online when existing resources are insufficient or clearly unrelated' : '4. NO EXTERNAL SEARCH: Web search is currently disabled. Focus on available local resources only.'} ## Specific Tool Selection Rules: @@ -205,16 +163,24 @@ Tool Distinctions: - view_pdf: For available PDF content analysis - view_image: For available image analysis - fetch_page: For getting content from new URLs -- search_online: ONLY when existing resources are insufficient +${hasOnlineSearch ? '- search_online: ONLY when existing resources are insufficient' : ''} Required Decision Flow: 1. Assess SELECTED tab relevance to query (if uncertain → view it) 2. Check other available resources (PDFs, images, other tabs) 3. Use click for deeper exploration of relevant content -4. Search online only if gaps remain after resource exploration +${ + hasOnlineSearch + ? '4. Search online only if gaps remain after resource exploration' + : '4. If information is insufficient, inform user that web search is disabled' +} Emergency Override: -- Current events/breaking news: search_online may be used first +${ + hasOnlineSearch + ? '- Current events/breaking news: search_online may be used first' + : '- Current events/breaking news: Inform user that web search is disabled' +} - Specific URLs mentioned: fetch_page for those URLs - User explicitly requests online search: follow user preference @@ -224,11 +190,11 @@ Emergency Override: - Click Priority: When you see relevant interactive elements, use click immediately rather than waiting for next round - Based on tool results, determine if additional navigation is needed for complete analysis - Stop Condition: Only stop tool usage when you have sufficient information to provide the requested analysis or summary -- For content exploration tasks, prioritize thoroughness over single-tool limitation +${hasOnlineSearch ? '- User explicitly requests online search: follow user preference' : '- User explicitly requests online search: Politely inform that web search is currently disabled'} # AVAILABLE TOOLS -${chatDefaultPromptBasedTools.map((tool) => renderPrompt`${new PromptBasedToolBuilder(tool)}`).join('\n\n')} +${tools.map((tool) => renderPrompt`${new PromptBasedToolBuilder(tool)}`).join('\n\n')} # WORKFLOW @@ -245,7 +211,7 @@ Simple two-step process for ALL queries: ### Step 3: Other Tools as Needed - Use other available resources: view_pdf, view_image, other tabs -- Use search_online only if existing resources don't provide sufficient information +${hasOnlineSearch ? '- Use search_online only if existing resources don\'t provide sufficient information' : '- Note: Web search is currently disabled. Focus on available local resources.'} - Use fetch_page for specific URLs mentioned by user Answer Language: Strictly follow the LANGUAGE POLICY above @@ -265,6 +231,7 @@ Answer Language: Strictly follow the LANGUAGE POLICY above - Never expose internal identifiers in user-visible text: page IDs, file IDs, element/link IDs - Instead say natural phrases like "Let me check that link" or "I'll look at that page" - Act as a seamless assistant, not a technical system demonstrating its capabilities` +} export const DEFAULT_WRITING_TOOLS_REWRITE_SYSTEM_PROMPT = `You are a text rewriting tool. You do NOT answer questions, explain concepts, or provide information. You ONLY rewrite text. diff --git a/utils/user-config/helpers.test.ts b/utils/user-config/helpers.test.ts index 6628186b..a5a126d2 100644 --- a/utils/user-config/helpers.test.ts +++ b/utils/user-config/helpers.test.ts @@ -1,6 +1,7 @@ import { afterEach } from 'node:test' import { beforeEach, describe, expect, it } from 'vitest' +import { computed, ref } from 'vue' import { storage } from 'wxt/utils/storage' import { resetFakeBrowser, resetFakeEntrypoint } from '@/tests/utils/fake-browser' @@ -142,4 +143,17 @@ describe('user config', () => { config.resetDefault() expect(await storage.getItem(config.areaKey)).toBe(null) }) + + it('reactive default value', async () => { + resetFakeBrowser() + + const enabled = ref(false) + const reactiveDefault = computed(() => (enabled.value ? 'on' : 'off')) + const config = await new Config('testKey').default(reactiveDefault).build() + const reactiveConfig = config.toRef() + + expect(reactiveConfig.value).toBe('off') + enabled.value = true + expect(reactiveConfig.value).toBe('on') + }) }) diff --git a/utils/user-config/helpers.ts b/utils/user-config/helpers.ts index 31a249dc..914d6175 100644 --- a/utils/user-config/helpers.ts +++ b/utils/user-config/helpers.ts @@ -1,4 +1,4 @@ -import { customRef, Ref, ref, toRaw, watch } from 'vue' +import { customRef, isRef, MaybeRef, Ref, ref, toRaw, unref, UnwrapRef, watch } from 'vue' import { storage, StorageItemKey } from 'wxt/utils/storage' const getItem = async (key: StorageItemKey) => { @@ -18,11 +18,9 @@ export class ValidateError extends Error { export type ExtendedRef = Ref & { defaultValue: D } -export class Config { - defaultValue?: DefaultValue +export class Config> { + defaultValue?: Ref> isSession = false - transformer?: (value: Value | DefaultValue) => Value | DefaultValue - validator?: (value: Value | DefaultValue) => { isValid: boolean, displayMessage?: string } migrations: { fromKey: string migrate: (value: Value | DefaultValue | undefined) => Value | DefaultValue | undefined @@ -34,9 +32,9 @@ export class Config { return `local:${this.key}` as const } - default(defaultValue: D) { - this.defaultValue = defaultValue - return this as unknown as Config + default(defaultValue: MaybeRef) { + this.defaultValue = (isRef(defaultValue) ? defaultValue : ref(defaultValue)) as Ref> + return this as unknown as Config, UnwrapRef> } migrateFrom(fromKey: string, migration: (value: Value | DefaultValue | undefined) => Value | DefaultValue | undefined) { @@ -44,11 +42,6 @@ export class Config { return this } - validate(validator: (value: Value | DefaultValue) => { isValid: boolean, displayMessage?: string }) { - this.validator = validator - return this - } - private removeItem() { return storage.removeItem(this.areaKey) } @@ -73,47 +66,43 @@ export class Config { return lastValue } - async build() { + getClonedDefaultValue() { const defaultValue = this.defaultValue - const clonedDefaultValue = structuredClone(defaultValue) + return structuredClone(toRaw(unref(defaultValue))) as DefaultValue + } + + async build() { const localValue = (await this.getItem()) ?? undefined const migratedValue = await this.execMigration() ?? localValue if (migratedValue) await this.setItem(migratedValue) - const v = migratedValue ?? clonedDefaultValue - const refValue = ref(v) + const v = migratedValue ?? localValue + const boundStorageValue = ref(v) let ignoreSetLocalStorage = false - watch(refValue, async (newValue) => { + watch(boundStorageValue, async (newValue) => { if (ignoreSetLocalStorage) return this.setItem(toRaw(newValue)) }, { deep: true, flush: 'sync' }) - const r = customRef((track, trigger) => { + const r = customRef>((track, trigger) => { return { - get() { + get: () => { track() - return refValue.value + const clonedDefaultValue = this.getClonedDefaultValue() + const r = boundStorageValue.value ?? clonedDefaultValue + return r }, set: (value) => { - if (this.transformer) { - value = this.transformer(value) - } - if (this.validator) { - const { isValid, displayMessage } = this.validator(value) - if (!isValid) { - throw new ValidateError(displayMessage || 'Invalid value') - } - } - refValue.value = value + boundStorageValue.value = value trigger() }, } }) as ExtendedRef - r.defaultValue = structuredClone(defaultValue) as DefaultValue + r.defaultValue = this.getClonedDefaultValue() storage.watch(this.areaKey, async (newValue, oldValue) => { newValue = newValue ?? undefined if (newValue !== oldValue) { ignoreSetLocalStorage = true - refValue.value = (newValue === undefined || newValue === null) ? structuredClone(defaultValue) : newValue + boundStorageValue.value = newValue ignoreSetLocalStorage = false } }) @@ -125,11 +114,11 @@ export class Config { return { get key() { return key }, get areaKey() { return areaKey }, - getDefault() { - return structuredClone(defaultValue) as DefaultValue + getDefault: () => { + return this.getClonedDefaultValue() }, - resetDefault() { - r.value = structuredClone(defaultValue) as DefaultValue + resetDefault: () => { + r.value = this.getClonedDefaultValue() removeItem() }, toRef: () => r, diff --git a/utils/user-config/index.ts b/utils/user-config/index.ts index 09369d5e..75c9df81 100644 --- a/utils/user-config/index.ts +++ b/utils/user-config/index.ts @@ -1,3 +1,4 @@ +import { computed } from 'vue' import { browser } from 'wxt/browser' import { c2bRpc } from '@/utils/rpc' @@ -6,11 +7,12 @@ import { SupportedLocaleCode } from '../i18n/constants' import { generateRandomId } from '../id' import { LanguageCode } from '../language/detect' import { LLMEndpointType } from '../llm/models' +import { promptBasedToolCollections } from '../llm/tools/prompt-based/tools' import logger from '../logger' import { lazyInitialize } from '../memo' import { forRuntimes } from '../runtime' import { ByteSize } from '../sizes' -import { DEFAULT_CHAT_SYSTEM_PROMPT, DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE, DEFAULT_CHAT_SYSTEM_PROMPT_WITH_TOOLS, DEFAULT_CHAT_TITLE_GENERATION_SYSTEM_PROMPT, DEFAULT_TRANSLATOR_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_LIST_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_PROOFREAD_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_REWRITE_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_SPARKLE_SYSTEM_PROMPT } from './defaults' +import { DEFAULT_CHAT_SYSTEM_PROMPT, DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE, DEFAULT_CHAT_TITLE_GENERATION_SYSTEM_PROMPT, DEFAULT_TRANSLATOR_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_LIST_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_PROOFREAD_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_REWRITE_SYSTEM_PROMPT, DEFAULT_WRITING_TOOLS_SPARKLE_SYSTEM_PROMPT } from './defaults' import { Config } from './helpers' const log = logger.child('user-config') @@ -65,6 +67,19 @@ export async function _getUserConfig() { } const enableBrowserUse = await new Config('browserUse.enable').default(false).build() + const enableOnlineSearch = await new Config('chat.onlineSearch.enable').default(true).build() + const defaultChatSystemPrompt = computed(() => { + const enableBrowserUseStatus = enableBrowserUse.get() + const enableOnlineSearchStatus = enableOnlineSearch.get() + if (enableBrowserUseStatus) { + if (enableOnlineSearchStatus) return DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE(promptBasedToolCollections.browserUse.onlineSearch) + else return DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE(promptBasedToolCollections.browserUse.nonOnlineSearch) + } + else { + if (enableOnlineSearchStatus) return DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE(promptBasedToolCollections.nonBrowserUse.onlineSearch) + else return DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE(promptBasedToolCollections.nonBrowserUse.nonOnlineSearch) + } + }) return { locale: { @@ -117,11 +132,12 @@ export async function _getUserConfig() { }, systemPrompt: await new Config('chat.systemPrompt_1') .migrateFrom('chat.systemPrompt', (v) => v === DEFAULT_CHAT_SYSTEM_PROMPT ? undefined : v) - .default(enableBrowserUse.get() ? DEFAULT_CHAT_SYSTEM_PROMPT_WITH_BROWSER_USE : DEFAULT_CHAT_SYSTEM_PROMPT_WITH_TOOLS).build(), + .default(defaultChatSystemPrompt).build(), history: { currentChatId: await new Config('chat.history.currentChatId').default(generateRandomId()).build(), }, onlineSearch: { + enable: enableOnlineSearch, pageReadCount: await new Config('chat.onlineSearch.pageReadCount').default(5).build(), // how many pages to read when online search is enabled }, quickActions: { From 0581a4f37d7710b0cc8d3903eb4e39a8d68b1005 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 12 Sep 2025 09:16:09 +0000 Subject: [PATCH 08/59] chore(release): v1.10.0-beta.3 --- CHANGELOG.md | 13 +++++++++++++ package.json | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 91745844..aa1d1e2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,19 @@ # Changelog +## v1.10.0-beta.3 + +[compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.10.0-beta.2...v1.10.0-beta.3) + +### 🚀 Enhancements + +- **tests:** Add onboarding and settings tests, enhance mock responses ([e8793b7](https://github.com/NativeMindBrowser/NativeMindExtension/commit/e8793b7)) +- **chat:** Add toggle to enable online search and its settings ([a60f489](https://github.com/NativeMindBrowser/NativeMindExtension/commit/a60f489)) + +### ❤️ Contributors + +- Tony Hu ([@tonyhu-012](http://github.com/tonyhu-012)) + ## v1.10.0-beta.2 [compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.10.0-beta.1...v1.10.0-beta.2) diff --git a/package.json b/package.json index e3451885..78bad44d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "nativemind-extension", - "version": "1.10.0-beta.2", + "version": "1.10.0-beta.3", "private": false, "author": "NativeMind", "keywords": [ From 374a849c869f4ba573d34d246a84b2f59984215c Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Mon, 15 Sep 2025 09:28:18 +0800 Subject: [PATCH 09/59] chore(browser-use): set default for browser use to true --- utils/user-config/index.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/user-config/index.ts b/utils/user-config/index.ts index 75c9df81..9c4c4f72 100644 --- a/utils/user-config/index.ts +++ b/utils/user-config/index.ts @@ -66,7 +66,7 @@ export async function _getUserConfig() { } } - const enableBrowserUse = await new Config('browserUse.enable').default(false).build() + const enableBrowserUse = await new Config('browserUse.enable').default(true).build() const enableOnlineSearch = await new Config('chat.onlineSearch.enable').default(true).build() const defaultChatSystemPrompt = computed(() => { const enableBrowserUseStatus = enableBrowserUse.get() From 4847c2cd0f388dcc1c209b31180d64f4f96f983a Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Tue, 16 Sep 2025 16:29:31 +0800 Subject: [PATCH 10/59] chore(chat): improve assistant message expanded class --- .../sidepanel/components/Chat/Messages/Assistant.vue | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/entrypoints/sidepanel/components/Chat/Messages/Assistant.vue b/entrypoints/sidepanel/components/Chat/Messages/Assistant.vue index d2a31a1c..17de5212 100644 --- a/entrypoints/sidepanel/components/Chat/Messages/Assistant.vue +++ b/entrypoints/sidepanel/components/Chat/Messages/Assistant.vue @@ -76,8 +76,7 @@ { +test('show download model tutorial if ollama/lm studio is running', async ({ page, extensionId, context, extension }) => { context.route('http://localhost:11434/', async (route) => { await route.fulfill({ body: 'Ollama is running', contentType: 'plain/text' }) }) - context.route(/http:\/\/localhost:1234/, async (route) => { - await route.abort('connectionfailed') - }) context.route(/\/api\/ps/, async (route) => { await route.fulfill({ body: JSON.stringify(ollamaPsEmptyResponse), contentType: 'application/json' }) }) @@ -20,8 +17,7 @@ test('show download model tutorial if ollama/lm studio is running', async ({ pag await route.fulfill({ body: JSON.stringify(ollamaShowResponse), contentType: 'application/json' }) }) await page.goto('chrome-extension://' + extensionId + '/sidepanel.html') - // @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types - await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' })) + await extension.setStorageItem('locale.current', 'en') await expect(page.getByText('Download a model to begin').first()).toBeVisible({ timeout: 15000 }) }) @@ -31,7 +27,6 @@ test('show startup tutorial if ollama is not running', async ({ page, extensionI }) await extension.setStorageItem('llm.backends.lmStudio.baseUrl', 'ws://localhost:12345') // set to a non-existing ws url to avoid lm studio connection await page.goto('chrome-extension://' + extensionId + '/sidepanel.html') - // @ts-expect-error - chrome.storage is a Chrome extension API but not defined in types - await page.evaluate(() => chrome.storage.local.set({ 'locale.current': 'en' })) + await extension.setStorageItem('locale.current', 'en') await expect(page.getByText('How do you want to run AI locally').first()).toBeVisible({ timeout: 15000 }) }) From a6ee1f9b15abf744383d8a5dc088c40520286007 Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Wed, 17 Sep 2025 16:55:48 +0800 Subject: [PATCH 12/59] chore(logging): enhance rawLoggingMiddleware to log stream and generate results --- utils/llm/middlewares.ts | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/utils/llm/middlewares.ts b/utils/llm/middlewares.ts index ba7c24fe..052cb0b3 100644 --- a/utils/llm/middlewares.ts +++ b/utils/llm/middlewares.ts @@ -5,7 +5,6 @@ import { extractReasoningMiddleware } from 'ai' import { z } from 'zod' import { nonNullable } from '../array' -import { debounce } from '../debounce' import { ParseFunctionCallError } from '../error' import { generateRandomId } from '../id' import Logger from '../logger' @@ -190,30 +189,31 @@ export const normalizeToolCallsMiddleware: LanguageModelV1Middleware = { export const rawLoggingMiddleware: LanguageModelV1Middleware = { wrapStream: async ({ doStream, params }) => { const log = logger.child('rawLoggingMiddleware') + const text: string[] = [] + const reasoning: string[] = [] const { stream, ...rest } = await doStream() - log.debug('Stream started', { params }) - let text = '' - let reasoning = '' - const printLog = debounce(() => { - log.debug('Stream progress', { text, reasoning }) - }, 2000) - const transformStream = new TransformStream< LanguageModelV1StreamPart, LanguageModelV1StreamPart >({ transform(chunk, controller) { if (chunk.type === 'text-delta') { - text += chunk.textDelta + text.push(chunk.textDelta) } else if (chunk.type === 'reasoning') { - reasoning += chunk.textDelta + reasoning.push(chunk.textDelta) } - printLog() controller.enqueue(chunk) }, + flush() { + log.info('LLM Stream Result', { + params, + text: text.join(''), + reasoning: reasoning.join(''), + }) + }, }) return { @@ -221,6 +221,18 @@ export const rawLoggingMiddleware: LanguageModelV1Middleware = { ...rest, } }, + wrapGenerate: async ({ doGenerate, params }) => { + const log = logger.child('rawLoggingMiddleware') + + const result = await doGenerate() + + log.info('LLM Generate Result', { + params, + result, + }) + + return result + }, } const errorResponse = /<\|channel\|>(?!\s*commentary\s+to=[a-z_.]+\s*>)[^<]+>(<\/assistant)?/gs @@ -437,9 +449,9 @@ export const lmStudioHarmonyEncodingMiddleware: LanguageModelV1Middleware = { } export const middlewares = [ - // rawLoggingMiddleware, normalizeToolCallsMiddleware, extractPromptBasedToolCallsMiddleware, lmStudioHarmonyEncodingMiddleware, reasoningMiddleware, + rawLoggingMiddleware, ] From 6e23ca8a29e1f1305cb5ed564fe87b7cb32041f6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 17 Sep 2025 09:02:33 +0000 Subject: [PATCH 13/59] chore(release): v1.10.0-beta.4 --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ package.json | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5904a3b4..9a117da3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,36 @@ # Changelog +## v1.10.0-beta.4 + +[compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.9.4...v1.10.0-beta.4) + +### 🚀 Enhancements + +- **lm-studio:** Initial support for LM Studio ([0f318d9](https://github.com/NativeMindBrowser/NativeMindExtension/commit/0f318d9)) +- **tests:** Add onboarding and settings tests, enhance mock responses ([e8793b7](https://github.com/NativeMindBrowser/NativeMindExtension/commit/e8793b7)) +- **chat:** Add toggle to enable online search and its settings ([a60f489](https://github.com/NativeMindBrowser/NativeMindExtension/commit/a60f489)) +- **tests:** Add LLM API tests and mock responses for Ollama ([16a1f8d](https://github.com/NativeMindBrowser/NativeMindExtension/commit/16a1f8d)) + +### 🩹 Fixes + +- **lm-studio:** Return consistent structured response format ([24d9636](https://github.com/NativeMindBrowser/NativeMindExtension/commit/24d9636)) +- **pattern-extractor:** Ensure extracted values are strings and handle undefined keys ([b95e17f](https://github.com/NativeMindBrowser/NativeMindExtension/commit/b95e17f)) + +### 🏡 Chore + +- **package:** Update version to 1.10.0 ([d933b93](https://github.com/NativeMindBrowser/NativeMindExtension/commit/d933b93)) +- **release:** V1.10.0-beta.1 ([67c8c4f](https://github.com/NativeMindBrowser/NativeMindExtension/commit/67c8c4f)) +- **release:** V1.10.0-beta.2 ([170af85](https://github.com/NativeMindBrowser/NativeMindExtension/commit/170af85)) +- **release:** V1.10.0-beta.3 ([0581a4f](https://github.com/NativeMindBrowser/NativeMindExtension/commit/0581a4f)) +- **browser-use:** Set default for browser use to true ([374a849](https://github.com/NativeMindBrowser/NativeMindExtension/commit/374a849)) +- **chat:** Improve assistant message expanded class ([4847c2c](https://github.com/NativeMindBrowser/NativeMindExtension/commit/4847c2c)) +- **logging:** Enhance rawLoggingMiddleware to log stream and generate results ([a6ee1f9](https://github.com/NativeMindBrowser/NativeMindExtension/commit/a6ee1f9)) + +### ❤️ Contributors + +- Tony Hu ([@tonyhu-012](http://github.com/tonyhu-012)) + ## v1.10.0-beta.3 [compare changes](https://github.com/NativeMindBrowser/NativeMindExtension/compare/v1.10.0-beta.2...v1.10.0-beta.3) diff --git a/package.json b/package.json index 78bad44d..da702d07 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "nativemind-extension", - "version": "1.10.0-beta.3", + "version": "1.10.0-beta.4", "private": false, "author": "NativeMind", "keywords": [ From 3b6a8e05264371e2a292601fba0c7bee9957df4a Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Thu, 18 Sep 2025 09:09:09 +0800 Subject: [PATCH 14/59] fix(web-llm): fix infinite loading when local model cache is invalid --- utils/rpc/background-fns.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/rpc/background-fns.ts b/utils/rpc/background-fns.ts index 966ea1b2..8b88ed51 100644 --- a/utils/rpc/background-fns.ts +++ b/utils/rpc/background-fns.ts @@ -599,6 +599,7 @@ function initWebLLMEngine(model: WebLLMSupportedModel) { }, }) port.postMessage({ type: 'ready' }) + port.disconnect() }) return { portName } } From 130bdd72c2cacb855f3e5a2999bb823fdc4d965b Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Mon, 22 Sep 2025 16:57:19 +0800 Subject: [PATCH 15/59] fix(settings): update connection message for LM Studio in multiple language files --- locales/de.json | 2 +- locales/en.json | 2 +- locales/es.json | 2 +- locales/fr.json | 2 +- locales/id.json | 2 +- locales/ja.json | 2 +- locales/pt.json | 2 +- locales/ru.json | 2 +- locales/th.json | 2 +- locales/vi.json | 2 +- locales/zh-CN.json | 2 +- locales/zh-TW.json | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/locales/de.json b/locales/de.json index 3a9095cb..6691ac24 100644 --- a/locales/de.json +++ b/locales/de.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama verbunden, aber keine aktiven Modelle", "not_connected_to_ollama": "Nicht mit Ollama verbunden", "lm_studio_no_active_models": "LM Studio verbunden, aber keine aktiven Modelle", - "not_connected_to_lm_studio": "Nicht mit Ollama verbunden" + "not_connected_to_lm_studio": "Nicht mit LM Studio verbunden" }, "delete_model_confirm": "Sind Sie sicher, dass Sie das Modell \"{model}\" löschen möchten?" }, diff --git a/locales/en.json b/locales/en.json index bae40e7d..462ffe86 100644 --- a/locales/en.json +++ b/locales/en.json @@ -111,7 +111,7 @@ "not_connected_to_ollama": "Not connected to Ollama", "no_active_models": "Ollama connected, but no active models", "lm_studio_no_active_models": "LM Studio connected, but no active models", - "not_connected_to_lm_studio": "Not connected to Ollama" + "not_connected_to_lm_studio": "Not connected to LM Studio" }, "delete_model_confirm": "Are you sure you want to delete the model \"{model}\"?" }, diff --git a/locales/es.json b/locales/es.json index b50f0946..cd952281 100644 --- a/locales/es.json +++ b/locales/es.json @@ -203,7 +203,7 @@ "no_active_models": "Ollama conectado, pero no hay modelos activos", "not_connected_to_ollama": "No conectado a Ollama", "lm_studio_no_active_models": "LM Studio conectado, pero no hay modelos activos", - "not_connected_to_lm_studio": "No conectado a Ollama" + "not_connected_to_lm_studio": "No conectado a LM Studio" }, "delete_model_confirm": "¿Estás seguro de que quieres eliminar el modelo \"{model}\"?" }, diff --git a/locales/fr.json b/locales/fr.json index c80379dd..51374aeb 100644 --- a/locales/fr.json +++ b/locales/fr.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama connecté, mais aucun modèle actif", "not_connected_to_ollama": "Pas connecté à Ollama", "lm_studio_no_active_models": "LM Studio connecté, mais aucun modèle actif", - "not_connected_to_lm_studio": "Pas connecté à Ollama" + "not_connected_to_lm_studio": "Pas connecté à LM Studio" }, "delete_model_confirm": "Êtes-vous sûr de vouloir supprimer le modèle \"{model}\" ?" }, diff --git a/locales/id.json b/locales/id.json index 03ea0433..cfff24ac 100644 --- a/locales/id.json +++ b/locales/id.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama terhubung, tetapi tidak ada model yang aktif", "not_connected_to_ollama": "Tidak terhubung ke Ollama", "lm_studio_no_active_models": "LM Studio terhubung, tetapi tidak ada model yang aktif", - "not_connected_to_lm_studio": "Tidak terhubung ke Ollama" + "not_connected_to_lm_studio": "Tidak terhubung ke LM Studio" }, "delete_model_confirm": "Apakah Anda yakin ingin menghapus model \"{model}\"?" }, diff --git a/locales/ja.json b/locales/ja.json index 14ebcafa..ce49669b 100644 --- a/locales/ja.json +++ b/locales/ja.json @@ -244,7 +244,7 @@ "no_active_models": "Ollamaに接続されていますが、アクティブなモデルがありません", "not_connected_to_ollama": "Ollamaに接続されていません", "lm_studio_no_active_models": "LM Studio に接続されましたが、アクティブなモデルがありません", - "not_connected_to_lm_studio": "Ollamaに接続されていません" + "not_connected_to_lm_studio": "LM Studioに接続されていません" }, "delete_model_confirm": "モデル「{model}」を削除してもよろしいですか?" }, diff --git a/locales/pt.json b/locales/pt.json index f62fb6d8..d2efb8e1 100644 --- a/locales/pt.json +++ b/locales/pt.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama conectado, mas nenhum modelo ativo", "not_connected_to_ollama": "Não conectado ao Ollama", "lm_studio_no_active_models": "LM Studio conectado, mas nenhum modelo ativo", - "not_connected_to_lm_studio": "Não conectado ao Ollama" + "not_connected_to_lm_studio": "Não conectado ao LM Studio" }, "delete_model_confirm": "Tem certeza de que deseja excluir o modelo \"{model}\"?" }, diff --git a/locales/ru.json b/locales/ru.json index 6a8617c0..bbb84b84 100644 --- a/locales/ru.json +++ b/locales/ru.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama подключен, но нет активных моделей", "not_connected_to_ollama": "Не подключено к Ollama", "lm_studio_no_active_models": "LM Studio подключена, но нет активных моделей", - "not_connected_to_lm_studio": "Не подключен к Ollama" + "not_connected_to_lm_studio": "Не подключено к LM Studio" }, "delete_model_confirm": "Вы уверены, что хотите удалить модель \"{model}\"?" }, diff --git a/locales/th.json b/locales/th.json index 49745006..7098bd6b 100644 --- a/locales/th.json +++ b/locales/th.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama เชื่อมต่อแล้ว แต่ไม่มีโมเดลที่ใช้งานอยู่", "not_connected_to_ollama": "ไม่ได้เชื่อมต่อกับ Ollama", "lm_studio_no_active_models": "LM Studio เชื่อมต่อแล้ว แต่ไม่มีโมเดลที่ใช้งานอยู่", - "not_connected_to_lm_studio": "ไม่ได้เชื่อมต่อกับ Ollama" + "not_connected_to_lm_studio": "ไม่ได้เชื่อมต่อกับ LM Studio" }, "delete_model_confirm": "คุณแน่ใจหรือไม่ว่าต้องการลบโมเดล \"{model}\"?" }, diff --git a/locales/vi.json b/locales/vi.json index c47bc31a..ce2a85bc 100644 --- a/locales/vi.json +++ b/locales/vi.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama đã kết nối, nhưng không có mô hình nào đang hoạt động", "not_connected_to_ollama": "Không kết nối với Ollama", "lm_studio_no_active_models": "LM Studio đã kết nối, nhưng không có mô hình nào đang hoạt động", - "not_connected_to_lm_studio": "Không kết nối với Ollama" + "not_connected_to_lm_studio": "Không kết nối với LM Studio" }, "delete_model_confirm": "Bạn có chắc chắn muốn xóa mô hình \"{model}\" không?" }, diff --git a/locales/zh-CN.json b/locales/zh-CN.json index 2373a7be..f6d9f474 100644 --- a/locales/zh-CN.json +++ b/locales/zh-CN.json @@ -101,7 +101,7 @@ "no_active_models": "Ollama 已连接,但没有活跃的模型", "not_connected_to_ollama": "未连接到 Ollama", "lm_studio_no_active_models": "LM Studio 已连接,但没有活跃的模型", - "not_connected_to_lm_studio": "未连接到 Ollama" + "not_connected_to_lm_studio": "未连接到 LM Studio" }, "delete_model_confirm": "您确定要删除模型\"{model}\"吗?" }, diff --git a/locales/zh-TW.json b/locales/zh-TW.json index 4638f211..4baeb728 100644 --- a/locales/zh-TW.json +++ b/locales/zh-TW.json @@ -244,7 +244,7 @@ "no_active_models": "Ollama 已連接,但沒有啟用的模型", "not_connected_to_ollama": "未連接到 Ollama", "lm_studio_no_active_models": "LM Studio 已連接,但沒有啟用的模型", - "not_connected_to_lm_studio": "未連接到 Ollama" + "not_connected_to_lm_studio": "未連接到 LM Studio" }, "delete_model_confirm": "您確定要刪除模型「{model}」嗎?" }, From 72515e884caae83b878038256acacf7049edc27d Mon Sep 17 00:00:00 2001 From: Tony Hu Date: Mon, 22 Sep 2025 16:57:55 +0800 Subject: [PATCH 16/59] chore(onboarding): adjust help section visibility based on selected endpoint type --- .../Blocks/LMStudioConfiguration.vue | 51 ++++++++----------- .../BackendSelectionTutorialCard.vue | 5 +- 2 files changed, 24 insertions(+), 32 deletions(-) diff --git a/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue b/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue index 86d4cf79..731c31f4 100644 --- a/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue +++ b/entrypoints/settings/components/GeneralSettings/Blocks/LMStudioConfiguration.vue @@ -1,6 +1,6 @@