From 408e54977fe7613172b50a50c4cc0148de34abbb Mon Sep 17 00:00:00 2001 From: Roger Zurawicki Date: Mon, 27 Feb 2023 08:40:01 -0500 Subject: [PATCH 1/3] Optimize context of git hunks --- src/git.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/git.rs b/src/git.rs index afb87fe..1dc276c 100644 --- a/src/git.rs +++ b/src/git.rs @@ -11,7 +11,16 @@ use crate::cmd; use anyhow::{bail, Result}; pub(crate) fn get_diffs() -> Result { - let output = cmd::run_command("git", &["diff", "--staged", "-w"])?; + let output = cmd::run_command( + "git", + &[ + "diff", + "--staged", + "--ignore-all-space", + "--diff-algorithm=minimal", + "--function-context", + ], + )?; Ok(output) } From c7c207d4440f646bc3801ca298d568fae68d1059 Mon Sep 17 00:00:00 2001 From: Roger Zurawicki Date: Wed, 1 Mar 2023 14:11:10 -0500 Subject: [PATCH 2/3] Lower OpenAI prompt token limits - Lower the prompt token limit for "text-davinci-003" and "text-davinci-002" to `4000` - Lower the prompt token limit for "code-davinci-002" to `4000` - Lower the prompt token limit for all other models to `4096` - Move the OpenAI client initialization to a separate function [src/llms/openai.rs] - Lower the prompt token limit for "text-davinci-003" and "text-davinci-002" to `4000` - Lower the prompt token limit for "code-davinci-002" to `4000` - Lower the prompt token limit for all other models to `4096` - Move the OpenAI client initialization to a separate function --- src/llms/openai.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/llms/openai.rs b/src/llms/openai.rs index 9623cc9..b50d9bb 100644 --- a/src/llms/openai.rs +++ b/src/llms/openai.rs @@ -40,13 +40,14 @@ impl OpenAIClient { pub(crate) fn get_prompt_token_limit_for_model(&self) -> usize { match self.model.as_str() { - "text-davinci-003" => 4097, + "text-davinci-003" => 4000, + "text-davinci-002" => 4000, "text-curie-001" => 2048, "text-babbage-001" => 2048, "text-ada-001" => 2048, - "code-davinci-002" => 8000, + "code-davinci-002" => 4000, "code-cushman-001" => 2048, - _ => 4097, + _ => 4096, } } } From f6d0bca7b81f7451ed5e528cf9ab288b27d254b6 Mon Sep 17 00:00:00 2001 From: Roger Zurawicki Date: Wed, 1 Mar 2023 14:35:24 -0500 Subject: [PATCH 3/3] Better error messages --- src/llms/openai.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/llms/openai.rs b/src/llms/openai.rs index b50d9bb..cda7c21 100644 --- a/src/llms/openai.rs +++ b/src/llms/openai.rs @@ -3,6 +3,7 @@ use std::time::Duration; use anyhow::{anyhow, bail, Result}; use async_trait::async_trait; +use colored::Colorize; use reqwest::{Client, ClientBuilder}; use serde_json::{json, Value}; use tiktoken_rs::tiktoken::{p50k_base, CoreBPE}; @@ -102,7 +103,24 @@ impl LlmClient for OpenAIClient { })?; Ok(json_response["choices"][0]["text"] .as_str() - .ok_or_else(|| anyhow!("Unexpected JSON response:\n{}", json_response))? + .ok_or_else(|| { + let error_message: &str = json_response + .get("error") + .and_then(|e| e.get("message")) + .and_then(|m| m.as_str()) + .unwrap_or_default(); + if !error_message.is_empty() { + return anyhow!( + "{}", + format!("OpenAI error: {error_message}").bold().yellow() + ); + } + + anyhow!( + "Unexpected API response:\n{}", + json_response.to_string().yellow() + ) + })? .trim() .to_string()) }