-
Notifications
You must be signed in to change notification settings - Fork 16
/
chatgpt.go
190 lines (171 loc) · 6.38 KB
/
chatgpt.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
package main
import (
"context"
"strings"
"github.com/PullRequestInc/go-gpt3"
"github.com/xyproto/env/v2"
"github.com/xyproto/mode"
"github.com/xyproto/vt100"
)
// For stopping ChatGTP from generating tokens when Esc is pressed
var continueGeneratingTokens bool
// ProgrammingLanguage returns true if the current mode appears to be a programming language (and not a markup language etc)
func (e *Editor) ProgrammingLanguage() bool {
switch e.mode {
case mode.Blank, mode.AIDL, mode.Amber, mode.Bazel, mode.Config, mode.Doc, mode.Email, mode.Git, mode.HIDL, mode.HTML, mode.JSON, mode.Log, mode.M4, mode.ManPage, mode.Markdown, mode.Nroff, mode.PolicyLanguage, mode.ReStructured, mode.Shader, mode.SQL, mode.Text, mode.XML:
return false
}
return true
}
// AIFixups adds a space after single-line comments
func (e *Editor) AIFixups(generatedLine string) string {
singleLineComment := e.SingleLineCommentMarker()
trimmedLine := strings.TrimSpace(generatedLine)
if len(trimmedLine) > 2 && e.ProgrammingLanguage() && strings.HasPrefix(trimmedLine, singleLineComment) && !strings.HasPrefix(trimmedLine, singleLineComment+" ") && !strings.HasPrefix(generatedLine, "#!") {
return strings.Replace(generatedLine, singleLineComment, singleLineComment+" ", 1)
}
return generatedLine
}
// GenerateTokens uses the ChatGTP API to generate text. n is the maximum number of tokens.
// The global atomic Bool "ContinueGeneratingTokens" controls when the text generation should stop.
func GenerateTokens(apiKey, prompt string, n int, temperature float32, model string, newToken func(string)) error {
client := gpt3.NewClient(apiKey)
chatContext, cancelFunction := context.WithCancel(context.Background())
defer cancelFunction()
err := client.CompletionStreamWithEngine(
chatContext,
model,
gpt3.CompletionRequest{
Prompt: []string{prompt},
MaxTokens: gpt3.IntPtr(n),
Temperature: gpt3.Float32Ptr(temperature),
}, func(resp *gpt3.CompletionResponse) {
newToken(resp.Choices[0].Text)
if !continueGeneratingTokens {
cancelFunction()
}
})
return err
}
// TODO: Find an exact way to find the number of tokens in the prompt, from a ChatGPT point of view
func countTokens(s string) int {
// Multiplying with 1.1 and adding 100, until the OpenAI API for counting tokens is used
return int(float64(len(strings.Fields(s)))*1.1 + 100)
}
// GenerateCodeOrText will try to generate and insert text at the corrent position in the editor, given a ChatGPT prompt
func (e *Editor) GenerateCodeOrText(c *vt100.Canvas, status *StatusBar, bookmark *Position, chatAPIKey, chatPrompt string) {
if chatAPIKey == "" {
status.SetErrorMessage("ChatGPT API key is empty")
status.Show(c, e)
return
}
// Strip away any leading exclamation marks and trim away spaces at the ends
prompt := strings.TrimSpace(strings.TrimSuffix(chatPrompt, "!"))
const (
GENERATE_TEXT = iota
GENERATE_CODE
CONTINUE_CODE
)
generationType := GENERATE_TEXT // GENERATE_CODE // CONTINUE_CODE
if e.ProgrammingLanguage() {
generationType = GENERATE_CODE
if prompt == "" {
generationType = CONTINUE_CODE
}
}
// Determine the temperature
var defaultTemperature float32
switch generationType {
case GENERATE_TEXT:
defaultTemperature = 0.8
}
temperature := env.Float32("CHATGPT_TEMPERATURE", defaultTemperature)
// Select a model
gptModel, gptModelTokens := gpt3.TextDavinci003Engine, 4000
//gptModel, gptModelTokens := "text-curie-001", 2048 // simpler and faster
//gptModel, gptModelTokens := "text-ada-001", 2048 // even simpler and even faster
switch generationType {
case CONTINUE_CODE:
gptModel, gptModelTokens = "code-davinci-002", 8000
//gptModel, gptModelTokens = "code-cushman-001", 2048 // slightly simpler and slightly faster
}
// Prefix the prompt
switch generationType {
case GENERATE_TEXT:
prompt += ". Write it in " + e.mode.String() + ". It should be expertly written, concise and correct."
case GENERATE_CODE:
prompt += ". Write it in " + e.mode.String() + " and include comments where it makes sense. The code should be concise, correct and expertly created. Comments above functions should start with the function name."
case CONTINUE_CODE:
initialPrompt := "Write the next 10 lines of this " + e.mode.String() + " program:\n"
// gather about 2000 tokens/fields from the current file and use that as the prompt
startTokens := strings.Fields(e.String())
gatherNTokens := gptModelTokens - countTokens(initialPrompt)
if len(startTokens) > gatherNTokens {
startTokens = startTokens[len(startTokens)-gatherNTokens:]
}
prompt = strings.Join(startTokens, " ")
}
// Set a suitable status bar text
status.ClearAll(c)
switch generationType {
case GENERATE_TEXT:
status.SetMessage("Generating text...")
case GENERATE_CODE:
status.SetMessage("Generating code...")
case CONTINUE_CODE:
status.SetMessage("Continuing code...")
}
status.Show(c, e)
// Find the maxTokens value that will be sent to the OpenAI API
amountOfPromptTokens := countTokens(prompt)
maxTokens := gptModelTokens - amountOfPromptTokens // The user can press Esc when there are enough tokens
if maxTokens < 1 {
status.SetErrorMessage("ChatGPT API request is too long")
status.Show(c, e)
return
}
// Start generating the code/text while inserting words into the editor as it happens
currentLeadingWhitespace := e.LeadingWhitespace()
continueGeneratingTokens = true // global
first := true
var generatedLine string
if err := GenerateTokens(chatAPIKey, prompt, maxTokens, temperature, gptModel, func(word string) {
generatedLine += word
if strings.HasSuffix(generatedLine, "\n") {
e.SetCurrentLine(currentLeadingWhitespace + e.AIFixups(generatedLine))
if !first {
if !e.EmptyTrimmedLine() {
e.InsertLineBelow()
e.pos.sy++
}
} else {
e.DeleteCurrentLineMoveBookmark(bookmark)
first = false
}
generatedLine = ""
} else {
e.SetCurrentLine(currentLeadingWhitespace + e.AIFixups(generatedLine))
}
// "refresh"
e.DrawLines(c, true, false)
}); err != nil {
errorMessage := err.Error()
if !strings.Contains(errorMessage, "context") {
e.End(c)
status.SetError(err)
status.Show(c, e)
return
}
}
e.End(c)
if continueGeneratingTokens { // global
if first { // Nothing was generated
status.SetMessageAfterRedraw("Nothing was generated")
} else {
status.SetMessageAfterRedraw("Done")
}
} else {
status.SetMessageAfterRedraw("Stopped")
}
e.RedrawAtEndOfKeyLoop(c, status)
}