From d9a201a385b14748641643b98a2ebbc8418e48fe Mon Sep 17 00:00:00 2001 From: Himanshu Neema Date: Sun, 30 Jul 2023 14:15:10 -0700 Subject: [PATCH] refactors (#91) * cargo fmt * remove colon * add function call stream in makefile --- async-openai/README.md | 2 +- examples/Makefile.toml | 1 + examples/function-call-stream/src/main.rs | 131 +++++++++++----------- 3 files changed, 67 insertions(+), 67 deletions(-) diff --git a/async-openai/README.md b/async-openai/README.md index 7ae42a4b..d8db0ca3 100644 --- a/async-openai/README.md +++ b/async-openai/README.md @@ -105,7 +105,7 @@ Thank you for your time to contribute and improve the project, I'd be happy to h A good starting point would be existing [open issues](https://github.com/64bit/async-openai/issues). -## Complimentary Crates: +## Complimentary Crates - [openai-func-enums](https://github.com/frankfralick/openai-func-enums) provides procedural macros that make it easier to use this library with OpenAI API's function calling feature. It also provides derive macros you can add to existing [clap](https://github.com/clap-rs/clap) application subcommands for natural language use of command line tools. ## License diff --git a/examples/Makefile.toml b/examples/Makefile.toml index ab3e00d0..eccb8897 100644 --- a/examples/Makefile.toml +++ b/examples/Makefile.toml @@ -15,6 +15,7 @@ cd create-image-edit && cargo run && cd - cd create-image-variation && cargo run && cd - cd embeddings && cargo run && cd - cd function-call && cargo run && cd - +cd function-call-stream && cargo run && cd - cd models && cargo run && cd - cd moderations && cargo run && cd - #cd rate-limit-completions && cargo run && cd - diff --git a/examples/function-call-stream/src/main.rs b/examples/function-call-stream/src/main.rs index 509b92d2..ffe8cce6 100644 --- a/examples/function-call-stream/src/main.rs +++ b/examples/function-call-stream/src/main.rs @@ -1,6 +1,6 @@ -use std::io::{stdout, Write}; use std::collections::HashMap; use std::error::Error; +use std::io::{stdout, Write}; use async_openai::{ types::{ @@ -13,7 +13,6 @@ use async_openai::{ use futures::StreamExt; use serde_json::json; - #[tokio::main] async fn main() -> Result<(), Box> { let client = Client::new(); @@ -44,80 +43,80 @@ async fn main() -> Result<(), Box> { .build()?; // the first response from GPT is just the json response containing the function that was called - // and the model-generated arguments for that function (don't stream this) + // and the model-generated arguments for that function (don't stream this) let response = client - .chat() - .create(request) - .await? - .choices - .get(0) - .unwrap() - .message - .clone(); - - if let Some(function_call) = response.function_call { - let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> = - HashMap::new(); - available_functions.insert("get_current_weather", get_current_weather); - - let function_name = function_call.name; - let function_args: serde_json::Value = function_call.arguments.parse().unwrap(); + .chat() + .create(request) + .await? + .choices + .get(0) + .unwrap() + .message + .clone(); - let location = function_args["location"].as_str().unwrap(); - let unit = "fahrenheit"; // why doesn't the model return a unit argument? - let function = available_functions.get(function_name.as_str()).unwrap(); + if let Some(function_call) = response.function_call { + let mut available_functions: HashMap<&str, fn(&str, &str) -> serde_json::Value> = + HashMap::new(); + available_functions.insert("get_current_weather", get_current_weather); + + let function_name = function_call.name; + let function_args: serde_json::Value = function_call.arguments.parse().unwrap(); + + let location = function_args["location"].as_str().unwrap(); + let unit = "fahrenheit"; // why doesn't the model return a unit argument? + let function = available_functions.get(function_name.as_str()).unwrap(); let function_response = function(location, unit); // call the function - let message = vec![ - ChatCompletionRequestMessageArgs::default() - .role(Role::User) - .content("What's the weather like in Boston?") - .build()?, - ChatCompletionRequestMessageArgs::default() - .role(Role::Function) - .content(function_response.to_string()) - .name(function_name) - .build()? - ]; + let message = vec![ + ChatCompletionRequestMessageArgs::default() + .role(Role::User) + .content("What's the weather like in Boston?") + .build()?, + ChatCompletionRequestMessageArgs::default() + .role(Role::Function) + .content(function_response.to_string()) + .name(function_name) + .build()?, + ]; + + let request = CreateChatCompletionRequestArgs::default() + .max_tokens(512u16) + .model("gpt-3.5-turbo-0613") + .messages(message) + .build()?; - let request = CreateChatCompletionRequestArgs::default() - .max_tokens(512u16) - .model("gpt-3.5-turbo-0613") - .messages(message) - .build()?; - - // Now stream received response from model, which essentially formats the function response - let mut stream = client.chat().create_stream(request).await?; + // Now stream received response from model, which essentially formats the function response + let mut stream = client.chat().create_stream(request).await?; - let mut lock = stdout().lock(); - while let Some(result) = stream.next().await { - match result { - Ok(response) => { - response.choices.iter().for_each(|chat_choice| { - if let Some(ref content) = chat_choice.delta.content { - write!(lock, "{}", content).unwrap(); - } - }); - } - Err(err) => { - writeln!(lock, "error: {err}").unwrap(); - } - } - stdout().flush()?; - } - println!("{}", "\n"); - } + let mut lock = stdout().lock(); + while let Some(result) = stream.next().await { + match result { + Ok(response) => { + response.choices.iter().for_each(|chat_choice| { + if let Some(ref content) = chat_choice.delta.content { + write!(lock, "{}", content).unwrap(); + } + }); + } + Err(err) => { + writeln!(lock, "error: {err}").unwrap(); + } + } + stdout().flush()?; + } + println!("{}", "\n"); + } Ok(()) } fn get_current_weather(location: &str, unit: &str) -> serde_json::Value { - let weather_info = json!({ - "location": location, - "temperature": "72", - "unit": unit, - "forecast": ["sunny", "windy"] - }); + let weather_info = json!({ + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"] + }); - weather_info + weather_info }