forked from FloatTech/ZeroBot-Plugin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chatgpt.go
119 lines (103 loc) · 3.01 KB
/
chatgpt.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
package chatgpt
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"strconv"
"time"
)
const (
// baseURL = "https://api.openai.com/v1/"
proxyURL = "https://open.aiproxy.xyz/v1/"
modelGPT3Dot5Turbo = "gpt-3.5-turbo"
yunKey = "7d06a110e9e20a684e02934549db1d3d"
yunURL = "https://api.a20safe.com/api.php?api=35&key=%s&apikey=%s"
)
type yun struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data []struct {
Return string `json:"return"`
Total string `json:"total"`
Available string `json:"available"`
Used string `json:"used"`
} `json:"data"`
}
// chatGPTResponseBody 响应体
type chatGPTResponseBody struct {
ID string `json:"id"`
Object string `json:"object"`
Created int `json:"created"`
Model string `json:"model"`
Choices []chatChoice `json:"choices"`
Usage chatUsage `json:"usage"`
}
// chatGPTRequestBody 请求体
type chatGPTRequestBody struct {
Model string `json:"model,omitempty"` // gpt3.5-turbo
Messages []chatMessage `json:"messages,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
N int `json:"n,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
}
// chatMessage 消息
type chatMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
type chatChoice struct {
Index int `json:"index"`
Message chatMessage
FinishReason string `json:"finish_reason"`
}
type chatUsage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
TotalTokens int `json:"total_tokens"`
}
var client = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Timeout: time.Minute * 5,
}
// completions gtp3.5文本模型回复
// curl https://api.openai.com/v1/chat/completions
// -H "Content-Type: application/json"
// -H "Authorization: Bearer YOUR_API_KEY"
// -d '{ "model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Hello!"}]}'
func completions(messages []chatMessage, apiKey string) (*chatGPTResponseBody, error) {
com := chatGPTRequestBody{
Messages: messages,
}
// default model
if com.Model == "" {
com.Model = modelGPT3Dot5Turbo
}
body, err := json.Marshal(com)
if err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodPost, proxyURL+"chat/completions", bytes.NewReader(body))
if err != nil {
return nil, err
}
req.Header.Set("Accept", "application/json; charset=utf-8")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
req.Header.Set("Content-Type", "application/json; charset=utf-8")
res, err := client.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusBadRequest {
return nil, errors.New("response error: " + strconv.Itoa(res.StatusCode))
}
v := new(chatGPTResponseBody)
if err = json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, err
}
return v, nil
}