From cbe785eb3f8fb6178dad81cc6fe27c5b1c5514c9 Mon Sep 17 00:00:00 2001 From: canisminor1990 Date: Sun, 24 Mar 2024 22:11:19 +0800 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20feat:=20Add=20new=20model=20info?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/types => common}/models.ts | 6 +++++ .../lobe-commit/src/commands/Config/index.tsx | 3 ++- packages/lobe-commit/src/core/Commits.ts | 3 ++- packages/lobe-commit/src/store/config.ts | 7 +++--- packages/lobe-commit/src/store/selectors.ts | 2 +- packages/lobe-commit/src/types/config.ts | 2 +- packages/lobe-i18n/src/store/initialState.ts | 5 ++-- packages/lobe-i18n/src/types/config.ts | 2 +- packages/lobe-i18n/src/types/models.ts | 25 ------------------- .../lobe-i18n/src/utils/splitJsonToChunks.ts | 4 +-- packages/lobe-seo/src/store/initialState.ts | 5 ++-- packages/lobe-seo/src/types/config.ts | 2 +- packages/lobe-seo/src/types/models.ts | 25 ------------------- 13 files changed, 26 insertions(+), 65 deletions(-) rename packages/{lobe-commit/src/types => common}/models.ts (74%) delete mode 100644 packages/lobe-i18n/src/types/models.ts delete mode 100644 packages/lobe-seo/src/types/models.ts diff --git a/packages/lobe-commit/src/types/models.ts b/packages/common/models.ts similarity index 74% rename from packages/lobe-commit/src/types/models.ts rename to packages/common/models.ts index a6e6a12..7a0305a 100644 --- a/packages/lobe-commit/src/types/models.ts +++ b/packages/common/models.ts @@ -3,12 +3,14 @@ export enum LanguageModel { * GPT 3.5 Turbo */ GPT3_5 = 'gpt-3.5-turbo', + GPT3_5_0125 = 'gpt-3.5-turbo-0125', GPT3_5_1106 = 'gpt-3.5-turbo-1106', GPT3_5_16K = 'gpt-3.5-turbo-16k', /** * GPT 4 */ GPT4 = 'gpt-4', + GPT4_0125_PREVIEW = 'gpt-4-0125-preview', GPT4_32K = 'gpt-4-32k', GPT4_PREVIEW = 'gpt-4-1106-preview', GPT4_VISION_PREVIEW = 'gpt-4-vision-preview', @@ -18,8 +20,12 @@ export const ModelTokens: Record = { [LanguageModel.GPT3_5]: 4096, [LanguageModel.GPT3_5_1106]: 16_385, [LanguageModel.GPT3_5_16K]: 16_385, + [LanguageModel.GPT3_5_0125]: 16_385, [LanguageModel.GPT4]: 8196, [LanguageModel.GPT4_PREVIEW]: 128_000, [LanguageModel.GPT4_VISION_PREVIEW]: 128_000, + [LanguageModel.GPT4_0125_PREVIEW]: 128_000, [LanguageModel.GPT4_32K]: 32_768, }; + +export const defaultModel = LanguageModel.GPT3_5_0125; diff --git a/packages/lobe-commit/src/commands/Config/index.tsx b/packages/lobe-commit/src/commands/Config/index.tsx index 848df04..b247fdf 100644 --- a/packages/lobe-commit/src/commands/Config/index.tsx +++ b/packages/lobe-commit/src/commands/Config/index.tsx @@ -5,7 +5,8 @@ import { memo, useMemo, useState } from 'react'; import { BASE_PROMPT } from '@/constants/template'; import { useConfStore } from '@/store'; import type { ConfigKeys, Config as LocalConfig } from '@/types/config'; -import { LanguageModel } from '@/types/models'; + +import { LanguageModel } from '../../../../common/models'; const Config = memo(() => { const [active, setActive] = useState(); diff --git a/packages/lobe-commit/src/core/Commits.ts b/packages/lobe-commit/src/core/Commits.ts index 5f7b21e..4e6909d 100644 --- a/packages/lobe-commit/src/core/Commits.ts +++ b/packages/lobe-commit/src/core/Commits.ts @@ -9,10 +9,11 @@ import { execSync } from 'node:child_process'; import { SUMMARY_PROMPT, SUMMARY_REFINE_PROMPT, promptCommits } from '@/prompts/commits'; import { selectors } from '@/store'; import { Config } from '@/types/config'; -import { ModelTokens } from '@/types/models'; import { calcToken } from '@/utils/calcToken'; import { addEmojiToMessage } from '@/utils/genCommitMessage'; +import { ModelTokens } from '../../../common/models'; + export interface GenAiCommitProps { cacheSummary?: string; setLoadingInfo: (text: string) => void; diff --git a/packages/lobe-commit/src/store/config.ts b/packages/lobe-commit/src/store/config.ts index 0aff3c5..518e58e 100644 --- a/packages/lobe-commit/src/store/config.ts +++ b/packages/lobe-commit/src/store/config.ts @@ -1,7 +1,8 @@ import Conf from 'conf'; import { ConfigSchema } from '@/types/config'; -import { LanguageModel, ModelTokens } from '@/types/models'; + +import { ModelTokens, defaultModel } from '../../../common/models'; export const schema: ConfigSchema = { apiBaseUrl: { @@ -9,7 +10,7 @@ export const schema: ConfigSchema = { type: 'string', }, diffChunkSize: { - default: ModelTokens[LanguageModel.GPT3_5] - 512, + default: ModelTokens[defaultModel] - 512, type: 'number', }, emoji: { @@ -29,7 +30,7 @@ export const schema: ConfigSchema = { type: 'number', }, modelName: { - default: LanguageModel.GPT3_5, + default: defaultModel, type: 'string', }, openaiToken: { diff --git a/packages/lobe-commit/src/store/selectors.ts b/packages/lobe-commit/src/store/selectors.ts index c1e3c06..0991e68 100644 --- a/packages/lobe-commit/src/store/selectors.ts +++ b/packages/lobe-commit/src/store/selectors.ts @@ -1,8 +1,8 @@ import dotenv from 'dotenv'; import { Config, ConfigKeys } from '@/types/config'; -import { ModelTokens } from '@/types/models'; +import { ModelTokens } from '../../../common/models'; import { config, schema } from './config'; dotenv.config(); diff --git a/packages/lobe-commit/src/types/config.ts b/packages/lobe-commit/src/types/config.ts index 209920e..06e0a12 100644 --- a/packages/lobe-commit/src/types/config.ts +++ b/packages/lobe-commit/src/types/config.ts @@ -1,4 +1,4 @@ -import { LanguageModel } from './models'; +import { LanguageModel } from '../../../common/models'; export interface Config { apiBaseUrl: string; diff --git a/packages/lobe-i18n/src/store/initialState.ts b/packages/lobe-i18n/src/store/initialState.ts index 43c084f..967115b 100644 --- a/packages/lobe-i18n/src/store/initialState.ts +++ b/packages/lobe-i18n/src/store/initialState.ts @@ -1,7 +1,8 @@ import { I18nConfig, MarkdownModeType } from '@/types/config'; -import { LanguageModel } from '@/types/models'; import { getDefaultExtension } from '@/utils/getDefaultExtension'; +import { defaultModel } from '../../../common/models'; + export const DEFAULT_CONFIG: Partial = { concurrency: 5, markdown: { @@ -9,6 +10,6 @@ export const DEFAULT_CONFIG: Partial = { mode: MarkdownModeType.STRING, outputExtensions: getDefaultExtension, }, - modelName: LanguageModel.GPT3_5, + modelName: defaultModel, temperature: 0, }; diff --git a/packages/lobe-i18n/src/types/config.ts b/packages/lobe-i18n/src/types/config.ts index b56f7c4..79e09e7 100644 --- a/packages/lobe-i18n/src/types/config.ts +++ b/packages/lobe-i18n/src/types/config.ts @@ -1,4 +1,4 @@ -import { LanguageModel } from './models'; +import { LanguageModel } from '../../../common/models'; export interface I18nConfigLocale { /** diff --git a/packages/lobe-i18n/src/types/models.ts b/packages/lobe-i18n/src/types/models.ts deleted file mode 100644 index a6e6a12..0000000 --- a/packages/lobe-i18n/src/types/models.ts +++ /dev/null @@ -1,25 +0,0 @@ -export enum LanguageModel { - /** - * GPT 3.5 Turbo - */ - GPT3_5 = 'gpt-3.5-turbo', - GPT3_5_1106 = 'gpt-3.5-turbo-1106', - GPT3_5_16K = 'gpt-3.5-turbo-16k', - /** - * GPT 4 - */ - GPT4 = 'gpt-4', - GPT4_32K = 'gpt-4-32k', - GPT4_PREVIEW = 'gpt-4-1106-preview', - GPT4_VISION_PREVIEW = 'gpt-4-vision-preview', -} - -export const ModelTokens: Record = { - [LanguageModel.GPT3_5]: 4096, - [LanguageModel.GPT3_5_1106]: 16_385, - [LanguageModel.GPT3_5_16K]: 16_385, - [LanguageModel.GPT4]: 8196, - [LanguageModel.GPT4_PREVIEW]: 128_000, - [LanguageModel.GPT4_VISION_PREVIEW]: 128_000, - [LanguageModel.GPT4_32K]: 32_768, -}; diff --git a/packages/lobe-i18n/src/utils/splitJsonToChunks.ts b/packages/lobe-i18n/src/utils/splitJsonToChunks.ts index 5db1cf4..ed5c514 100644 --- a/packages/lobe-i18n/src/utils/splitJsonToChunks.ts +++ b/packages/lobe-i18n/src/utils/splitJsonToChunks.ts @@ -2,8 +2,8 @@ import { isPlainObject, reduce } from 'lodash-es'; import { LocaleObj } from '@/types'; import { I18nConfig } from '@/types/config'; -import { LanguageModel, ModelTokens } from '@/types/models'; +import { ModelTokens, defaultModel } from '../../../common/models'; import { KEY_EXTRA_TOKENS, OBJECT_EXTRA_TOKENS, @@ -38,7 +38,7 @@ const splitJSONtoSmallChunks = (object: LocaleObj, splitToken: number) => ).map(([chunk]) => chunk); export const getSplitToken = (config: I18nConfig, prompt: string) => { - let splitToken = (ModelTokens[config.modelName || LanguageModel.GPT3_5] - calcToken(prompt)) / 3; + let splitToken = (ModelTokens[config.modelName || defaultModel] - calcToken(prompt)) / 3; if (config.splitToken && config.splitToken < splitToken) { splitToken = config.splitToken; } diff --git a/packages/lobe-seo/src/store/initialState.ts b/packages/lobe-seo/src/store/initialState.ts index d22426e..6de390e 100644 --- a/packages/lobe-seo/src/store/initialState.ts +++ b/packages/lobe-seo/src/store/initialState.ts @@ -1,9 +1,10 @@ import { SeoConfig } from '@/types/config'; -import { LanguageModel } from '@/types/models'; + +import { defaultModel } from '../../../common/models'; export const DEFAULT_CONFIG: Partial = { concurrency: 5, entryExtension: '.mdx', - modelName: LanguageModel.GPT3_5, + modelName: defaultModel, temperature: 0, }; diff --git a/packages/lobe-seo/src/types/config.ts b/packages/lobe-seo/src/types/config.ts index 6523d55..10548f7 100644 --- a/packages/lobe-seo/src/types/config.ts +++ b/packages/lobe-seo/src/types/config.ts @@ -1,4 +1,4 @@ -import { LanguageModel } from './models'; +import { LanguageModel } from '../../../common/models'; export interface SeoConfig { /** diff --git a/packages/lobe-seo/src/types/models.ts b/packages/lobe-seo/src/types/models.ts deleted file mode 100644 index a6e6a12..0000000 --- a/packages/lobe-seo/src/types/models.ts +++ /dev/null @@ -1,25 +0,0 @@ -export enum LanguageModel { - /** - * GPT 3.5 Turbo - */ - GPT3_5 = 'gpt-3.5-turbo', - GPT3_5_1106 = 'gpt-3.5-turbo-1106', - GPT3_5_16K = 'gpt-3.5-turbo-16k', - /** - * GPT 4 - */ - GPT4 = 'gpt-4', - GPT4_32K = 'gpt-4-32k', - GPT4_PREVIEW = 'gpt-4-1106-preview', - GPT4_VISION_PREVIEW = 'gpt-4-vision-preview', -} - -export const ModelTokens: Record = { - [LanguageModel.GPT3_5]: 4096, - [LanguageModel.GPT3_5_1106]: 16_385, - [LanguageModel.GPT3_5_16K]: 16_385, - [LanguageModel.GPT4]: 8196, - [LanguageModel.GPT4_PREVIEW]: 128_000, - [LanguageModel.GPT4_VISION_PREVIEW]: 128_000, - [LanguageModel.GPT4_32K]: 32_768, -};