This repository has been archived by the owner on Feb 2, 2024. It is now read-only.
generated from vercel/ai-chatbot
-
Notifications
You must be signed in to change notification settings - Fork 2
/
route.ts
145 lines (126 loc) · 3.51 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import { kv } from '@vercel/kv'
import { Message, OpenAIStream, StreamingTextResponse } from 'ai'
import { Configuration, OpenAIApi } from 'openai-edge'
import { auth } from '@/auth'
import { nanoid } from '@/lib/utils'
import { Langfuse } from 'langfuse'
import { type ChatCompletionRequestMessage } from 'openai-edge/types/types/chat'
export const runtime = 'edge'
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY
})
const openai = new OpenAIApi(configuration)
const langfuse = new Langfuse({
secretKey: process.env.LANGFUSE_SECRET_KEY ?? '',
publicKey: process.env.NEXT_PUBLIC_LANGFUSE_PUBLIC_KEY ?? '',
baseUrl: process.env.NEXT_PUBLIC_LANGFUSE_BASE_URL
})
export async function POST(req: Request) {
const json = await req.json()
const { messages, previewToken } = json
const { id: userId, email: userEmail } = (await auth())?.user
const chatId = json.id ?? nanoid()
// Exclude additional fields from being sent to OpenAI
const openAiMessages = (messages as Message[]).map(({ content, role }) => ({
content,
role: role
}))
if (!userId) {
return new Response('Unauthorized', {
status: 401
})
}
if (previewToken) {
configuration.apiKey = previewToken
}
const trace = langfuse.trace({
name: 'chat',
id: `lf-ai-chat:${chatId}`,
metadata: {
userEmail
},
userId: `user:${userId}`
})
const lfGeneration = trace.generation({
name: 'chat',
prompt: openAiMessages as any, // TODO: fix SDK types
model: 'gpt-3.5-turbo',
modelParameters: {
temperature: 0.7
}
})
// Use the langfuse generation ID as the message ID
// Alternatively, trace.generation also accepts id as an argument if you want to use your own message id
const messageId = lfGeneration.id
const res = await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
messages: openAiMessages as ChatCompletionRequestMessage[], // Inconsistent type definitions of ai SDK, missing "function" role
temperature: 0.7,
stream: true
})
const stream = OpenAIStream(res, {
async onStart() {
lfGeneration.update({
completionStartTime: new Date()
})
},
async onCompletion(completion) {
lfGeneration.update({
endTime: new Date(),
completion
})
const title = json.messages[0].content.substring(0, 100)
const createdAt = Date.now()
const path = `/chat/${chatId}`
const payload = {
id: chatId,
title,
userId,
createdAt,
path,
messages: [
...messages,
{
content: completion,
role: 'assistant',
id: messageId
}
]
}
await kv.hmset(`chat:${chatId}`, payload)
lfGeneration.event({
startTime: new Date(),
name: 'kv-hmset',
level: 'DEBUG',
input: {
key: `chat:${chatId}`,
...payload
}
})
await kv.zadd(`user:chat:${userId}`, {
score: createdAt,
member: `chat:${chatId}`
})
lfGeneration.event({
startTime: new Date(),
name: 'kv-zadd',
level: 'DEBUG',
input: {
key: `user:chat:${userId}`,
score: createdAt,
member: `chat:${chatId}`
}
})
try {
await langfuse.shutdownAsync()
} catch (e) {
console.error(JSON.stringify(e))
}
}
})
return new StreamingTextResponse(stream, {
headers: {
'X-Message-Id': messageId
}
})
}