forked from mckaywrigley/chatbot-ui
/
openai.ts
53 lines (49 loc) · 1.23 KB
/
openai.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import { OPENAI_API_TYPE } from '../utils/app/const';
export interface OpenAIModel {
id: string;
name: string;
maxLength: number; // maximum length of a message
tokenLimit: number;
}
export enum OpenAIModelID {
GPT_3_5 = 'gpt-3.5-turbo',
GPT_3_5_AZ = 'gpt-35-turbo',
GPT_4 = 'gpt-4',
GPT_4_32K = 'gpt-4-32k',
LLAMA = 'llama',
}
// in case the `DEFAULT_MODEL` environment variable is not set or set to an unsupported model
export const fallbackModelID = OpenAIModelID.GPT_3_5;
export const OpenAIModels: Record<OpenAIModelID, OpenAIModel> = {
[OpenAIModelID.GPT_3_5]: {
id: OpenAIModelID.GPT_3_5,
name: 'GPT-3.5',
maxLength: 12000,
tokenLimit: 4000,
},
[OpenAIModelID.GPT_3_5_AZ]: {
id: OpenAIModelID.GPT_3_5_AZ,
name: 'GPT-3.5',
maxLength: 12000,
tokenLimit: 4000,
},
[OpenAIModelID.GPT_4]: {
id: OpenAIModelID.GPT_4,
name: 'GPT-4',
maxLength: 24000,
tokenLimit: 8000,
},
[OpenAIModelID.GPT_4_32K]: {
id: OpenAIModelID.GPT_4_32K,
name: 'GPT-4-32K',
maxLength: 96000,
tokenLimit: 32000,
},
// our custom models
[OpenAIModelID.LLAMA]: {
id: OpenAIModelID.LLAMA,
name: 'Llama-compatible LLM',
maxLength: 12000,
tokenLimit: 4000,
},
};