-
Notifications
You must be signed in to change notification settings - Fork 3
/
app.js
82 lines (75 loc) · 2.68 KB
/
app.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
const http = require('http');
const https = require('https');
var url = require('url');
const { createLogger, format, transports } = require('winston');
const { combine, timestamp, label, json } = format;
const logger = createLogger({
level: 'info',
format: combine(
timestamp(),
json()
),
defaultMeta: { service: 'openai-client-service' },
transports: [
new transports.File({ filename: 'openai.log' }),
],
});
const { Configuration, OpenAIApi } = require("openai");
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY
});
const openai = new OpenAIApi(configuration);
const hostname = '127.0.0.1';
const port = 8090;
function report_metric(openai_response) {
var post_data = "openai.promt_token_count,model=" + openai_response.model + " " + openai_response.usage.prompt_tokens + "\n";
post_data += "openai.completion_token_count,model=" + openai_response.model + " " + openai_response.usage.completion_tokens + "\n";
post_data += "openai.total_token_count,model=" + openai_response.model + " " + openai_response.usage.total_tokens + "\n";
var post_options = {
host: 'localhost',
port: '14499',
path: '/metrics/ingest',
method: 'POST',
headers: {
'Content-Type': 'text/plain',
'Content-Length': Buffer.byteLength(post_data)
}
};
var metric_req = http.request(post_options, (resp) => {}).on("error", (err) => {
logger.log('error', `OpenAI error ${err}`);
});
metric_req.write(post_data);
metric_req.end();
}
const server = http.createServer(async (req, res) => {
var params = url.parse(req.url, true).query;
logger.log('info', `endpoint called ${url.parse(req.url, true).pathname}`);
if (url.parse(req.url, true).pathname == '/' && params.prompt) {
try {
const response = await openai.createCompletion({
model: "text-davinci-003",
prompt: params.prompt,
temperature: 0,
max_tokens: 10,
});
const completion = response.data.choices[0].text;
report_metric(response.data);
// log completion
logger.log('info', `OpenAI response promt_tokens:${response.data.usage.prompt_tokens} completion_tokens:${response.data.usage.completion_tokens} total_tokens:${response.data.usage.total_tokens}`);
res.statusCode = 200;
res.setHeader('Content-Type', 'text/plain');
res.end(completion);
} catch (error){
res.statusCode = 500;
res.setHeader('Content-Type', 'text/plain');
res.end("Error");
}
} else {
res.statusCode = 200;
res.setHeader('Content-Type', 'text/plain');
res.end("You asked for nothing!");
}
});
server.listen(port, () => {
logger.log('info', `Server running at http://${hostname}:${port}/`);
});