-
Notifications
You must be signed in to change notification settings - Fork 0
/
chat.rs
81 lines (70 loc) · 2.59 KB
/
chat.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
use std::error::Error;
use async_openai::types::ChatCompletionRequestAssistantMessageArgs;
use async_openai::types::ChatCompletionRequestMessage;
use async_openai::types::ChatCompletionRequestSystemMessageArgs;
use async_openai::types::ChatCompletionRequestUserMessageArgs;
use async_openai::types::CreateChatCompletionRequestArgs;
use async_openai::types::Role;
use async_openai::Client;
use chat_splitter::ChatSplitter;
use chat_splitter::IntoChatCompletionRequestMessage;
const MODEL: &str = "gpt-3.5-turbo";
const MAX_TOKENS: u16 = 1024;
const MAX_MESSAGES: usize = 16;
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn Error>> {
let mut stored_messages = get_stored_messages()?;
stored_messages.push(
ChatCompletionRequestUserMessageArgs::default()
.role(Role::User)
.content("Where was it played?")
.build()?
.into(),
);
assert!(stored_messages.len() > MAX_MESSAGES);
let (_outdated_messages, recent_messages) = ChatSplitter::new(MODEL)
.max_tokens(MAX_TOKENS)
.max_messages(MAX_MESSAGES)
.split(&stored_messages);
let mut messages = vec![ChatCompletionRequestSystemMessageArgs::default()
.role(Role::System)
.content("You are a helpful assistant.")
.build()?
.into()];
messages.extend(recent_messages.iter().cloned());
assert!(messages.len() <= MAX_MESSAGES + 1);
let request = CreateChatCompletionRequestArgs::default()
.model(MODEL)
.max_tokens(MAX_TOKENS)
.messages(messages)
.build()?;
let client = Client::new();
let response = client.chat().create(request).await?;
println!("\nResponse:\n");
for choice in response.choices {
println!(
"{}: Role: {} Content: {:?}",
choice.index, choice.message.role, choice.message.content
);
stored_messages.push(choice.message.into_async_openai());
}
Ok(())
}
fn get_stored_messages() -> Result<Vec<ChatCompletionRequestMessage>, Box<dyn Error>> {
let mut messages = Vec::new();
for _ in 0..2000 {
messages.extend([
ChatCompletionRequestUserMessageArgs::default()
.role(Role::User)
.content("Who won the world series in 2020?")
.build()?
.into(),
ChatCompletionRequestAssistantMessageArgs::default()
.role(Role::Assistant)
.content("The Los Angeles Dodgers won the World Series in 2020.")
.build()?
.into(),
]);
}
Ok(messages)
}