-
Notifications
You must be signed in to change notification settings - Fork 21
/
+server.ts
98 lines (80 loc) · 2.57 KB
/
+server.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import { OPENAI_KEY } from '$env/static/private';
import type { CreateChatCompletionRequest, ChatCompletionRequestMessage } from 'openai';
import type { RequestHandler } from './$types';
import { getTokens } from '$lib/utils/tokenizer';
import { json } from '@sveltejs/kit';
import type { Config } from '@sveltejs/adapter-vercel';
export const config: Config = {
runtime: 'edge'
};
export const POST: RequestHandler = async ({ request }) => {
try {
if (!OPENAI_KEY) {
throw new Error('OPENAI_KEY env variable not set');
}
const requestData = await request.json();
if (!requestData) {
throw new Error('No request data');
}
const reqMessages: ChatCompletionRequestMessage[] = requestData.messages;
if (!reqMessages) {
throw new Error('no messages provided');
}
let tokenCount = 0;
reqMessages.forEach((msg) => {
const tokens = getTokens(msg.content);
tokenCount += tokens;
});
const moderationRes = await fetch('https://api.openai.com/v1/moderations', {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${OPENAI_KEY}`
},
method: 'POST',
body: JSON.stringify({
input: reqMessages[reqMessages.length - 1].content
})
});
const moderationData = await moderationRes.json();
const [results] = moderationData.results;
if (results.flagged) {
throw new Error('Query flagged by openai');
}
const prompt =
'You are a virtual assistant for a company called Huntabyte. Your name is Axel Smith';
tokenCount += getTokens(prompt);
if (tokenCount >= 4000) {
throw new Error('Query too large');
}
const messages: ChatCompletionRequestMessage[] = [
{ role: 'system', content: prompt },
...reqMessages
];
const chatRequestOpts: CreateChatCompletionRequest = {
model: 'gpt-3.5-turbo',
messages,
temperature: 0.9,
stream: true
};
const chatResponse = await fetch('https://api.openai.com/v1/chat/completions', {
headers: {
Authorization: `Bearer ${OPENAI_KEY}`,
'Content-Type': 'application/json'
},
method: 'POST',
body: JSON.stringify(chatRequestOpts)
});
if (!chatResponse.ok) {
const err = await chatResponse.json();
throw new Error(err);
}
return new Response(chatResponse.body, {
headers: {
'Content-Type': 'text/event-stream'
}
});
} catch (err) {
console.error(err);
return json({ error: 'There was an error processing your request' }, { status: 500 });
}
};