Skip to content

Commit

Permalink
refactors (#91)
Browse files Browse the repository at this point in the history
* cargo fmt

* remove colon

* add function call stream in makefile
  • Loading branch information
64bit committed Jul 30, 2023
1 parent e120962 commit d9a201a
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 67 deletions.
2 changes: 1 addition & 1 deletion async-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ Thank you for your time to contribute and improve the project, I'd be happy to h

A good starting point would be existing [open issues](https://github.com/64bit/async-openai/issues).

## Complimentary Crates:
## Complimentary Crates
- [openai-func-enums](https://github.com/frankfralick/openai-func-enums) provides procedural macros that make it easier to use this library with OpenAI API's function calling feature. It also provides derive macros you can add to existing [clap](https://github.com/clap-rs/clap) application subcommands for natural language use of command line tools.

## License
Expand Down
1 change: 1 addition & 0 deletions examples/Makefile.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ cd create-image-edit && cargo run && cd -
cd create-image-variation && cargo run && cd -
cd embeddings && cargo run && cd -
cd function-call && cargo run && cd -
cd function-call-stream && cargo run && cd -
cd models && cargo run && cd -
cd moderations && cargo run && cd -
#cd rate-limit-completions && cargo run && cd -
Expand Down
131 changes: 65 additions & 66 deletions examples/function-call-stream/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::io::{stdout, Write};
use std::collections::HashMap;
use std::error::Error;
use std::io::{stdout, Write};

use async_openai::{
types::{
Expand All @@ -13,7 +13,6 @@ use async_openai::{
use futures::StreamExt;
use serde_json::json;


#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let client = Client::new();
Expand Down Expand Up @@ -44,80 +43,80 @@ async fn main() -> Result<(), Box<dyn Error>> {
.build()?;

// the first response from GPT is just the json response containing the function that was called
// and the model-generated arguments for that function (don't stream this)
// and the model-generated arguments for that function (don't stream this)
let response = client
.chat()
.create(request)
.await?
.choices
.get(0)
.unwrap()
.message
.clone();

if let Some(function_call) = response.function_call {
let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> =
HashMap::new();
available_functions.insert("get_current_weather", get_current_weather);

let function_name = function_call.name;
let function_args: serde_json::Value = function_call.arguments.parse().unwrap();
.chat()
.create(request)
.await?
.choices
.get(0)
.unwrap()
.message
.clone();

let location = function_args["location"].as_str().unwrap();
let unit = "fahrenheit"; // why doesn't the model return a unit argument?
let function = available_functions.get(function_name.as_str()).unwrap();
if let Some(function_call) = response.function_call {
let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> =
HashMap::new();
available_functions.insert("get_current_weather", get_current_weather);

let function_name = function_call.name;
let function_args: serde_json::Value = function_call.arguments.parse().unwrap();

let location = function_args["location"].as_str().unwrap();
let unit = "fahrenheit"; // why doesn't the model return a unit argument?
let function = available_functions.get(function_name.as_str()).unwrap();
let function_response = function(location, unit); // call the function

let message = vec![
ChatCompletionRequestMessageArgs::default()
.role(Role::User)
.content("What's the weather like in Boston?")
.build()?,
ChatCompletionRequestMessageArgs::default()
.role(Role::Function)
.content(function_response.to_string())
.name(function_name)
.build()?
];
let message = vec![
ChatCompletionRequestMessageArgs::default()
.role(Role::User)
.content("What's the weather like in Boston?")
.build()?,
ChatCompletionRequestMessageArgs::default()
.role(Role::Function)
.content(function_response.to_string())
.name(function_name)
.build()?,
];

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.model("gpt-3.5-turbo-0613")
.messages(message)
.build()?;

let request = CreateChatCompletionRequestArgs::default()
.max_tokens(512u16)
.model("gpt-3.5-turbo-0613")
.messages(message)
.build()?;

// Now stream received response from model, which essentially formats the function response
let mut stream = client.chat().create_stream(request).await?;
// Now stream received response from model, which essentially formats the function response
let mut stream = client.chat().create_stream(request).await?;

let mut lock = stdout().lock();
while let Some(result) = stream.next().await {
match result {
Ok(response) => {
response.choices.iter().for_each(|chat_choice| {
if let Some(ref content) = chat_choice.delta.content {
write!(lock, "{}", content).unwrap();
}
});
}
Err(err) => {
writeln!(lock, "error: {err}").unwrap();
}
}
stdout().flush()?;
}
println!("{}", "\n");
}
let mut lock = stdout().lock();
while let Some(result) = stream.next().await {
match result {
Ok(response) => {
response.choices.iter().for_each(|chat_choice| {
if let Some(ref content) = chat_choice.delta.content {
write!(lock, "{}", content).unwrap();
}
});
}
Err(err) => {
writeln!(lock, "error: {err}").unwrap();
}
}
stdout().flush()?;
}
println!("{}", "\n");
}

Ok(())
}

fn get_current_weather(location: &str, unit: &str) -> serde_json::Value {
let weather_info = json!({
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"]
});
let weather_info = json!({
"location": location,
"temperature": "72",
"unit": unit,
"forecast": ["sunny", "windy"]
});

weather_info
weather_info
}

0 comments on commit d9a201a

Please sign in to comment.