-
Notifications
You must be signed in to change notification settings - Fork 255
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Introduce chat mode, refactor API (#32)
This commit introduces major changes to the library: 1. Chat mode is introduced, allowing conversing with the API in chat models, asking the API to modify previously generated code. After a response is returned, users can hit the "c" key to send further instructions to the API. 2. The `--full` flag is removed in favor of `--readme-file`. This allows storing both the code, and the full output, as separate files, rather than one or the other. The `--readme-file` is optional, the full output will not be saved if not provided. 3. The command line flags are simplified. The `-s` flag is removed. The `-q` flag now enabled non-interactive mode, but will still honor the `--output-file` and `--readme-file` flags. 4. More models are now supported. The output from the `list-models` command is now returned in tabular form, with more information. Every model now has its own maximum tokens value. The code-davinci-002 model is removed as it is about to be removed by OpenAI as well. 5. The library now includes separate methods for completion models and chat models. The former use the `Complete` method, the latter use the `Chat` method, with one or more `Send` method calls on the resulting object. The previous `GenerateCode` method still exists as a simple wrapper around these two. 6. The signature of the `GenerateCode` method is changed. It now requires the model to use (whereas previously it was a Client attribute). Instead of simply returning the generated code and an error, it returns a Response object that contains the generated code, the full output, the API key used, and the number of tokens utilized by the request. 7. Requests to the API now send a temperature value of 0.2 for more deterministic responses. 8. A `version` command is added to the CLI that prints the `aiac` version. 9. The README file is updated with a new demo and updated instructions, including how to use aiac as a library. Due to the backwards-incompatible changes in this commit, the major version of the project is bumped to 3.
- Loading branch information
Showing
13 changed files
with
659 additions
and
348 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,2 @@ | ||
aiac | ||
.env* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
package libaiac | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
) | ||
|
||
// Conversation is a struct used to converse with an OpenAI chat model. It | ||
// maintains all messages sent/received in order to maintain context just like | ||
// using ChatGPT. | ||
type Conversation struct { | ||
client *Client | ||
model Model | ||
messages []message | ||
} | ||
|
||
type message struct { | ||
Role string `json:"role"` | ||
Content string `json:"content"` | ||
} | ||
|
||
type chatResponse struct { | ||
Choices []struct { | ||
Message message `json:"message"` | ||
Index int64 `json:"index"` | ||
FinishReason string `json:"finish_reason"` | ||
} `json:"choices"` | ||
Usage struct { | ||
TotalTokens int64 `json:"total_tokens"` | ||
} `json:"usage"` | ||
} | ||
|
||
// Chat initiates a conversation with an OpenAI chat model. A conversation | ||
// maintains context, allowing to send further instructions to modify the output | ||
// from previous requests, just like using the ChatGPT website. | ||
func (client *Client) Chat(model Model) *Conversation { | ||
if model.Type != ModelTypeChat { | ||
return nil | ||
} | ||
|
||
return &Conversation{ | ||
client: client, | ||
model: model, | ||
} | ||
} | ||
|
||
// Send sends the provided message to the API and returns a Response object. | ||
// To maintain context, all previous messages (whether from you to the API or | ||
// vice-versa) are sent as well, allowing you to ask the API to modify the | ||
// code it already generated. | ||
func (conv *Conversation) Send(ctx context.Context, prompt string) ( | ||
res Response, | ||
err error, | ||
) { | ||
var answer chatResponse | ||
|
||
conv.messages = append(conv.messages, message{ | ||
Role: "user", | ||
Content: prompt, | ||
}) | ||
|
||
err = conv.client.NewRequest("POST", "/chat/completions"). | ||
JSONBody(map[string]interface{}{ | ||
"model": conv.model.Name, | ||
"messages": conv.messages, | ||
"temperature": 0.2, | ||
}). | ||
Into(&answer). | ||
RunContext(ctx) | ||
if err != nil { | ||
return res, fmt.Errorf("failed sending prompt: %w", err) | ||
} | ||
|
||
if len(answer.Choices) == 0 { | ||
return res, ErrNoResults | ||
} | ||
|
||
if answer.Choices[0].FinishReason != "stop" { | ||
return res, fmt.Errorf( | ||
"%w: %s", | ||
ErrResultTruncated, | ||
answer.Choices[0].FinishReason, | ||
) | ||
} | ||
|
||
conv.messages = append(conv.messages, answer.Choices[0].Message) | ||
|
||
res.FullOutput = strings.TrimSpace(answer.Choices[0].Message.Content) | ||
res.APIKeyUsed = conv.client.apiKey | ||
res.TokensUsed = answer.Usage.TotalTokens | ||
|
||
var ok bool | ||
if res.Code, ok = ExtractCode(res.FullOutput); !ok { | ||
res.Code = res.FullOutput | ||
} | ||
|
||
return res, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
package libaiac | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
) | ||
|
||
type completionResponse struct { | ||
Choices []struct { | ||
Text string `json:"text"` | ||
Index int64 `json:"index"` | ||
FinishReason string `json:"finish_reason"` | ||
} `json:"choices"` | ||
Usage struct { | ||
TotalTokens int64 `json:"total_tokens"` | ||
} `json:"usage"` | ||
} | ||
|
||
// Complete sends a request to OpenAI's Completions API using the provided model | ||
// and prompt, and returns the response | ||
func (client *Client) Complete( | ||
ctx context.Context, | ||
model Model, | ||
prompt string, | ||
) (res Response, err error) { | ||
var answer completionResponse | ||
|
||
err = client.NewRequest("POST", "/completions"). | ||
JSONBody(map[string]interface{}{ | ||
"model": model.Name, | ||
"prompt": prompt, | ||
"max_tokens": model.MaxTokens - len(prompt), | ||
"temperature": 0.2, | ||
}). | ||
Into(&answer). | ||
RunContext(ctx) | ||
if err != nil { | ||
return res, fmt.Errorf("failed sending prompt: %w", err) | ||
} | ||
|
||
if len(answer.Choices) == 0 { | ||
return res, ErrNoResults | ||
} | ||
|
||
if answer.Choices[0].FinishReason != "stop" { | ||
return res, fmt.Errorf( | ||
"%w: %s", | ||
ErrResultTruncated, | ||
answer.Choices[0].FinishReason, | ||
) | ||
} | ||
|
||
res.FullOutput = strings.TrimSpace(answer.Choices[0].Text) | ||
res.APIKeyUsed = client.apiKey | ||
res.TokensUsed = answer.Usage.TotalTokens | ||
|
||
var ok bool | ||
if res.Code, ok = ExtractCode(res.FullOutput); !ok { | ||
res.Code = res.FullOutput | ||
} | ||
|
||
return res, nil | ||
} |
Oops, something went wrong.