From 22ebab8c028c9167db551de9ae9658d6aa1be4d3 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 5 Oct 2025 20:03:20 +0000 Subject: [PATCH 01/10] Initial plan From 692797b8647a95c2839b1aceea16945c266766f3 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 5 Oct 2025 20:23:34 +0000 Subject: [PATCH 02/10] Allow arbitrary model names in configuration with proper OpenAI error handling Co-authored-by: oleander <220827+oleander@users.noreply.github.com> --- src/bin/hook.rs | 4 +- src/model.rs | 46 ++++++++++++++------ tests/llm_input_generation_test.rs | 7 +-- tests/model_custom_test.rs | 68 ++++++++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 19 deletions(-) create mode 100644 tests/model_custom_test.rs diff --git a/src/bin/hook.rs b/src/bin/hook.rs index c04afe60..87f37dcc 100644 --- a/src/bin/hook.rs +++ b/src/bin/hook.rs @@ -117,7 +117,7 @@ impl Args { } let patch = repo - .to_patch(tree, remaining_tokens, model) + .to_patch(tree, remaining_tokens, model.clone()) .context("Failed to get patch")?; let response = commit::generate(patch.to_string(), remaining_tokens, model, None).await?; @@ -190,7 +190,7 @@ impl Args { } let patch = repo - .to_patch(tree, remaining_tokens, model) + .to_patch(tree, remaining_tokens, model.clone()) .context("Failed to get patch")?; let response = commit::generate(patch.to_string(), remaining_tokens, model, None).await?; diff --git a/src/model.rs b/src/model.rs index 344d4d21..069b71a5 100644 --- a/src/model.rs +++ b/src/model.rs @@ -27,7 +27,7 @@ const DEFAULT_MODEL_NAME: &str = "gpt-4.1"; /// Represents the available AI models for commit message generation. /// Each model has different capabilities and token limits. -#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Default)] pub enum Model { /// Standard GPT-4 model GPT4, @@ -37,7 +37,9 @@ pub enum Model { GPT4oMini, /// Default model - GPT-4.1 latest version #[default] - GPT41 + GPT41, + /// Custom model name for any other OpenAI model + Custom(String) } impl Model { @@ -60,8 +62,8 @@ impl Model { // Always use the proper tokenizer for accurate counts // We cannot afford to underestimate tokens as it may cause API failures let tokenizer = TOKENIZER.get_or_init(|| { - let model_str: &str = self.into(); - get_tokenizer(model_str) + let model_str = String::from(self); + get_tokenizer(&model_str) }); // Use direct tokenization for accurate token count @@ -75,8 +77,16 @@ impl Model { /// * `usize` - The maximum number of tokens the model can process pub fn context_size(&self) -> usize { profile!("Get context size"); - let model_str: &str = self.into(); - get_context_size(model_str) + + // For custom models, we don't know the context size, so use a reasonable default + // that works for most modern OpenAI models (GPT-4 family context size) + match self { + Model::Custom(_) => 128000, // Default to GPT-4 context size for custom models + _ => { + let model_str = String::from(self); + get_context_size(&model_str) + } + } } /// Truncates the given text to fit within the specified token limit. @@ -167,17 +177,25 @@ impl Model { } } -impl From<&Model> for &str { +impl From<&Model> for String { fn from(model: &Model) -> Self { match model { - Model::GPT4o => MODEL_GPT4_OPTIMIZED, - Model::GPT4 => MODEL_GPT4, - Model::GPT4oMini => MODEL_GPT4_MINI, - Model::GPT41 => MODEL_GPT4_1 + Model::GPT4o => MODEL_GPT4_OPTIMIZED.to_string(), + Model::GPT4 => MODEL_GPT4.to_string(), + Model::GPT4oMini => MODEL_GPT4_MINI.to_string(), + Model::GPT41 => MODEL_GPT4_1.to_string(), + Model::Custom(name) => name.clone() } } } +// Keep the old impl for backwards compatibility where possible +impl Model { + pub fn as_str(&self) -> String { + self.into() + } +} + impl FromStr for Model { type Err = anyhow::Error; @@ -187,21 +205,21 @@ impl FromStr for Model { "gpt-4" => Ok(Model::GPT4), "gpt-4o-mini" => Ok(Model::GPT4oMini), "gpt-4.1" => Ok(Model::GPT41), - model => bail!("Invalid model name: {}", model) + model => Ok(Model::Custom(model.to_string())) } } } impl Display for Model { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", <&str>::from(self)) + write!(f, "{}", String::from(self)) } } // Implement conversion from string types to Model with fallback to default impl From<&str> for Model { fn from(s: &str) -> Self { - s.parse().unwrap_or_default() + s.parse().unwrap_or_else(|_| Model::Custom(s.to_string())) } } diff --git a/tests/llm_input_generation_test.rs b/tests/llm_input_generation_test.rs index ad08eb51..0a0f1940 100644 --- a/tests/llm_input_generation_test.rs +++ b/tests/llm_input_generation_test.rs @@ -89,8 +89,9 @@ fn test_create_request_preserves_model() { let models = vec![Model::GPT4oMini, Model::GPT4o, Model::GPT4, Model::GPT41]; for model in models { - let result = create_commit_request(diff.clone(), 1000, model); - assert!(result.is_ok(), "Should work with model {:?}", model); + let model_clone = model.clone(); + let result = create_commit_request(diff.clone(), 1000, model.clone()); + assert!(result.is_ok(), "Should work with model {:?}", model_clone); let request = result.unwrap(); assert_eq!(request.model, model, "Should preserve model type"); @@ -549,7 +550,7 @@ index 123abc..456def 100644 let model = Model::GPT4oMini; let template = get_instruction_template().unwrap(); let token_count = token_used(&model).unwrap(); - let request = create_commit_request(simple_diff.clone(), 2000, model).unwrap(); + let request = create_commit_request(simple_diff.clone(), 2000, model.clone()).unwrap(); // Verify all components work together assert!(!template.is_empty(), "Template should be generated"); diff --git a/tests/model_custom_test.rs b/tests/model_custom_test.rs new file mode 100644 index 00000000..c3c42040 --- /dev/null +++ b/tests/model_custom_test.rs @@ -0,0 +1,68 @@ +use ai::model::Model; + +#[test] +fn test_custom_model_creation() { + // Test that custom model names are accepted + let custom_model: Model = "custom-gpt-model".into(); + match custom_model { + Model::Custom(name) => assert_eq!(name, "custom-gpt-model"), + _ => panic!("Should create Custom variant") + } +} + +#[test] +fn test_predefined_model_creation() { + // Test that predefined models still work + let gpt4: Model = "gpt-4".into(); + assert_eq!(gpt4, Model::GPT4); + + let gpt4o: Model = "gpt-4o".into(); + assert_eq!(gpt4o, Model::GPT4o); +} + +#[test] +fn test_model_string_conversion() { + let custom_model = Model::Custom("my-custom-model".to_string()); + let model_str = String::from(&custom_model); + assert_eq!(model_str, "my-custom-model"); + + let gpt4 = Model::GPT4; + let gpt4_str = String::from(&gpt4); + assert_eq!(gpt4_str, "gpt-4"); +} + +#[test] +fn test_model_display() { + let custom_model = Model::Custom("test-model".to_string()); + assert_eq!(format!("{}", custom_model), "test-model"); +} + +#[test] +fn test_custom_model_context_size() { + let custom_model = Model::Custom("unknown-model".to_string()); + // Should use default context size for unknown models + assert_eq!(custom_model.context_size(), 128000); +} + +#[test] +fn test_custom_model_token_counting() { + let custom_model = Model::Custom("test-model".to_string()); + let result = custom_model.count_tokens("Hello world"); + assert!(result.is_ok(), "Token counting should work for custom models"); + assert!(result.unwrap() > 0, "Should count tokens"); +} + +#[test] +fn test_from_str_parsing() { + // Test parsing various model names + let model1: Result = "gpt-4o".parse(); + assert!(model1.is_ok()); + assert_eq!(model1.unwrap(), Model::GPT4o); + + let model2: Result = "custom-model-name".parse(); + assert!(model2.is_ok()); + match model2.unwrap() { + Model::Custom(name) => assert_eq!(name, "custom-model-name"), + _ => panic!("Should create Custom variant") + } +} \ No newline at end of file From d6c0b1320434183aadfafc43790452cdc1426d6d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 5 Oct 2025 20:58:18 +0000 Subject: [PATCH 03/10] Replace Custom model variant with specific GPT-4.1, GPT-4.1-mini, GPT-4.1-nano, and GPT-4.5 variants Co-authored-by: oleander <220827+oleander@users.noreply.github.com> --- src/bin/hook.rs | 4 +- src/commit.rs | 4 +- src/model.rs | 51 +++++++++------------- src/openai.rs | 2 +- tests/llm_input_generation_test.rs | 59 +++++++++++++------------- tests/model_custom_test.rs | 68 ------------------------------ tests/model_token_test.rs | 8 ++-- 7 files changed, 58 insertions(+), 138 deletions(-) delete mode 100644 tests/model_custom_test.rs diff --git a/src/bin/hook.rs b/src/bin/hook.rs index 87f37dcc..c04afe60 100644 --- a/src/bin/hook.rs +++ b/src/bin/hook.rs @@ -117,7 +117,7 @@ impl Args { } let patch = repo - .to_patch(tree, remaining_tokens, model.clone()) + .to_patch(tree, remaining_tokens, model) .context("Failed to get patch")?; let response = commit::generate(patch.to_string(), remaining_tokens, model, None).await?; @@ -190,7 +190,7 @@ impl Args { } let patch = repo - .to_patch(tree, remaining_tokens, model.clone()) + .to_patch(tree, remaining_tokens, model) .context("Failed to get patch")?; let response = commit::generate(patch.to_string(), remaining_tokens, model, None).await?; diff --git a/src/commit.rs b/src/commit.rs index 908d37ef..94a8a917 100644 --- a/src/commit.rs +++ b/src/commit.rs @@ -231,7 +231,7 @@ mod tests { let result = generate( "diff --git a/test.txt b/test.txt\n+Hello World".to_string(), 1024, - Model::GPT4oMini, + Model::GPT41Mini, Some(&settings) ) .await; @@ -265,7 +265,7 @@ mod tests { let result = generate( "diff --git a/test.txt b/test.txt\n+Hello World".to_string(), 1024, - Model::GPT4oMini, + Model::GPT41Mini, Some(&settings) ) .await; diff --git a/src/model.rs b/src/model.rs index 069b71a5..4864e8eb 100644 --- a/src/model.rs +++ b/src/model.rs @@ -18,28 +18,26 @@ use crate::config::App as Settings; // Use App as Settings static TOKENIZER: OnceLock = OnceLock::new(); // Model identifiers - using screaming case for constants -const MODEL_GPT4: &str = "gpt-4"; -const MODEL_GPT4_OPTIMIZED: &str = "gpt-4o"; -const MODEL_GPT4_MINI: &str = "gpt-4o-mini"; const MODEL_GPT4_1: &str = "gpt-4.1"; +const MODEL_GPT4_1_MINI: &str = "gpt-4.1-mini"; +const MODEL_GPT4_1_NANO: &str = "gpt-4.1-nano"; +const MODEL_GPT4_5: &str = "gpt-4.5"; // TODO: Get this from config.rs or a shared constants module const DEFAULT_MODEL_NAME: &str = "gpt-4.1"; /// Represents the available AI models for commit message generation. /// Each model has different capabilities and token limits. -#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Default)] pub enum Model { - /// Standard GPT-4 model - GPT4, - /// Optimized GPT-4 model for better performance - GPT4o, - /// Mini version of optimized GPT-4 for faster processing - GPT4oMini, /// Default model - GPT-4.1 latest version #[default] GPT41, - /// Custom model name for any other OpenAI model - Custom(String) + /// Mini version of GPT-4.1 for faster processing + GPT41Mini, + /// Nano version of GPT-4.1 for very fast processing + GPT41Nano, + /// GPT-4.5 model for advanced capabilities + GPT45 } impl Model { @@ -77,16 +75,8 @@ impl Model { /// * `usize` - The maximum number of tokens the model can process pub fn context_size(&self) -> usize { profile!("Get context size"); - - // For custom models, we don't know the context size, so use a reasonable default - // that works for most modern OpenAI models (GPT-4 family context size) - match self { - Model::Custom(_) => 128000, // Default to GPT-4 context size for custom models - _ => { - let model_str = String::from(self); - get_context_size(&model_str) - } - } + let model_str = String::from(self); + get_context_size(&model_str) } /// Truncates the given text to fit within the specified token limit. @@ -180,11 +170,10 @@ impl Model { impl From<&Model> for String { fn from(model: &Model) -> Self { match model { - Model::GPT4o => MODEL_GPT4_OPTIMIZED.to_string(), - Model::GPT4 => MODEL_GPT4.to_string(), - Model::GPT4oMini => MODEL_GPT4_MINI.to_string(), Model::GPT41 => MODEL_GPT4_1.to_string(), - Model::Custom(name) => name.clone() + Model::GPT41Mini => MODEL_GPT4_1_MINI.to_string(), + Model::GPT41Nano => MODEL_GPT4_1_NANO.to_string(), + Model::GPT45 => MODEL_GPT4_5.to_string() } } } @@ -201,11 +190,11 @@ impl FromStr for Model { fn from_str(s: &str) -> Result { match s.trim().to_lowercase().as_str() { - "gpt-4o" => Ok(Model::GPT4o), - "gpt-4" => Ok(Model::GPT4), - "gpt-4o-mini" => Ok(Model::GPT4oMini), "gpt-4.1" => Ok(Model::GPT41), - model => Ok(Model::Custom(model.to_string())) + "gpt-4.1-mini" => Ok(Model::GPT41Mini), + "gpt-4.1-nano" => Ok(Model::GPT41Nano), + "gpt-4.5" => Ok(Model::GPT45), + model => bail!("Invalid model name: {}", model) } } } @@ -219,7 +208,7 @@ impl Display for Model { // Implement conversion from string types to Model with fallback to default impl From<&str> for Model { fn from(s: &str) -> Self { - s.parse().unwrap_or_else(|_| Model::Custom(s.to_string())) + s.parse().unwrap_or_default() } } diff --git a/src/openai.rs b/src/openai.rs index 53cfac13..9528dab4 100644 --- a/src/openai.rs +++ b/src/openai.rs @@ -37,7 +37,7 @@ pub async fn generate_commit_message(diff: &str) -> Result { if let Ok(api_key) = std::env::var("OPENAI_API_KEY") { if !api_key.is_empty() { // Use the commit function directly without parsing - match commit::generate(diff.to_string(), 256, Model::GPT4oMini, None).await { + match commit::generate(diff.to_string(), 256, Model::GPT41Mini, None).await { Ok(response) => return Ok(response.response.trim().to_string()), Err(e) => { log::warn!("Direct generation failed, falling back to local: {e}"); diff --git a/tests/llm_input_generation_test.rs b/tests/llm_input_generation_test.rs index 0a0f1940..d1c2f117 100644 --- a/tests/llm_input_generation_test.rs +++ b/tests/llm_input_generation_test.rs @@ -28,7 +28,7 @@ fn test_template_generation_with_default_max_length() { #[test] fn test_token_counting_empty_template() { // Token counting should work even with minimal content - let model = Model::GPT4oMini; + let model = Model::GPT41Mini; let result = model.count_tokens(""); assert!(result.is_ok(), "Should handle empty string"); assert_eq!(result.unwrap(), 0, "Empty string should have 0 tokens"); @@ -37,7 +37,7 @@ fn test_token_counting_empty_template() { #[test] fn test_token_counting_template() { // Test that we can count tokens in the actual template - let model = Model::GPT4oMini; + let model = Model::GPT41Mini; let result = token_used(&model); assert!(result.is_ok(), "Token counting should succeed"); @@ -52,7 +52,7 @@ fn test_token_counting_template() { fn test_create_request_with_zero_tokens() { // Edge case: what happens with 0 max_tokens? let diff = "diff --git a/test.txt b/test.txt\n+Hello World".to_string(); - let result = create_commit_request(diff, 0, Model::GPT4oMini); + let result = create_commit_request(diff, 0, Model::GPT41Mini); assert!(result.is_ok(), "Should create request even with 0 tokens"); let request = result.unwrap(); @@ -63,7 +63,7 @@ fn test_create_request_with_zero_tokens() { fn test_create_request_with_empty_diff() { // Corner case: empty diff let diff = "".to_string(); - let result = create_commit_request(diff.clone(), 1000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle empty diff"); let request = result.unwrap(); @@ -75,7 +75,7 @@ fn test_create_request_with_empty_diff() { fn test_create_request_with_whitespace_only_diff() { // Corner case: whitespace-only diff let diff = " \n\t\n ".to_string(); - let result = create_commit_request(diff.clone(), 1000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle whitespace-only diff"); let request = result.unwrap(); @@ -86,12 +86,11 @@ fn test_create_request_with_whitespace_only_diff() { fn test_create_request_preserves_model() { // Test that different models are preserved correctly let diff = "diff --git a/test.txt b/test.txt\n+Test".to_string(); - let models = vec![Model::GPT4oMini, Model::GPT4o, Model::GPT4, Model::GPT41]; + let models = vec![Model::GPT41Mini, Model::GPT45, Model::GPT41, Model::GPT41Nano]; for model in models { - let model_clone = model.clone(); - let result = create_commit_request(diff.clone(), 1000, model.clone()); - assert!(result.is_ok(), "Should work with model {:?}", model_clone); + let result = create_commit_request(diff.clone(), 1000, model); + assert!(result.is_ok(), "Should work with model {:?}", model); let request = result.unwrap(); assert_eq!(request.model, model, "Should preserve model type"); @@ -104,7 +103,7 @@ fn test_create_request_with_max_u16_tokens() { let diff = "diff --git a/test.txt b/test.txt\n+Test".to_string(); let max_tokens = usize::from(u16::MAX); - let result = create_commit_request(diff, max_tokens, Model::GPT4oMini); + let result = create_commit_request(diff, max_tokens, Model::GPT41Mini); assert!(result.is_ok(), "Should handle max u16 tokens"); let request = result.unwrap(); @@ -117,7 +116,7 @@ fn test_create_request_with_overflow_tokens() { let diff = "diff --git a/test.txt b/test.txt\n+Test".to_string(); let max_tokens = usize::from(u16::MAX) + 1000; - let result = create_commit_request(diff, max_tokens, Model::GPT4oMini); + let result = create_commit_request(diff, max_tokens, Model::GPT41Mini); assert!(result.is_ok(), "Should handle token overflow"); let request = result.unwrap(); @@ -140,7 +139,7 @@ index 1234567..abcdefg 100644 "# .to_string(); - let result = create_commit_request(diff.clone(), 1000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle simple diff"); let request = result.unwrap(); @@ -165,7 +164,7 @@ index 0000000..1234567 "# .to_string(); - let result = create_commit_request(diff.clone(), 2000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 2000, Model::GPT45); assert!(result.is_ok(), "Should handle file addition"); let request = result.unwrap(); @@ -187,7 +186,7 @@ index 1234567..0000000 "# .to_string(); - let result = create_commit_request(diff.clone(), 1500, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1500, Model::GPT41Mini); assert!(result.is_ok(), "Should handle file deletion"); let request = result.unwrap(); @@ -209,7 +208,7 @@ index 1234567..abcdefg 100644 "# .to_string(); - let result = create_commit_request(diff.clone(), 1000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle file rename"); let request = result.unwrap(); @@ -219,7 +218,7 @@ index 1234567..abcdefg 100644 #[test] fn test_token_counting_with_diff_content() { - let model = Model::GPT4oMini; + let model = Model::GPT41Mini; let small_diff = "diff --git a/a.txt b/a.txt\n+Hello"; let medium_diff = r#"diff --git a/test.js b/test.js @@ -276,7 +275,7 @@ index 0000000..5555555 "# .to_string(); - let result = create_commit_request(diff.clone(), 3000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 3000, Model::GPT45); assert!(result.is_ok(), "Should handle multiple file changes"); let request = result.unwrap(); @@ -293,7 +292,7 @@ Binary files a/image.png and b/image.png differ "# .to_string(); - let result = create_commit_request(diff.clone(), 1000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 1000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle binary file diff"); let request = result.unwrap(); @@ -314,7 +313,7 @@ index 1234567..abcdefg 100644 "# .to_string(); - let result = create_commit_request(diff.clone(), 2000, Model::GPT4oMini); + let result = create_commit_request(diff.clone(), 2000, Model::GPT41Mini); assert!(result.is_ok(), "Should handle special characters"); let request = result.unwrap(); @@ -336,14 +335,14 @@ fn test_create_request_with_large_diff() { } } - let result = create_commit_request(diff.clone(), 8000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 8000, Model::GPT45); assert!(result.is_ok(), "Should handle large diff"); let request = result.unwrap(); assert!(request.prompt.len() > 10000, "Large diff should be preserved"); // Count tokens to ensure we can handle large inputs - let model = Model::GPT4o; + let model = Model::GPT45; let token_count = model.count_tokens(&diff).unwrap(); assert!(token_count > 1000, "Large diff should have substantial token count"); } @@ -357,7 +356,7 @@ fn test_create_request_with_very_long_lines() { long_line ); - let result = create_commit_request(diff.clone(), 5000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 5000, Model::GPT45); assert!(result.is_ok(), "Should handle very long lines"); let request = result.unwrap(); @@ -401,7 +400,7 @@ Binary files a/image.png and b/image.png differ "# .to_string(); - let result = create_commit_request(diff.clone(), 4000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 4000, Model::GPT45); assert!(result.is_ok(), "Should handle mixed operations"); let request = result.unwrap(); @@ -416,7 +415,7 @@ Binary files a/image.png and b/image.png differ #[test] fn test_token_counting_consistency_with_complex_diff() { - let model = Model::GPT4oMini; + let model = Model::GPT41Mini; let complex_diff = r#"diff --git a/src/main.rs b/src/main.rs index abc123..def456 100644 @@ -487,7 +486,7 @@ index 777..888 100644 "# .to_string(); - let result = create_commit_request(diff.clone(), 5000, Model::GPT4o); + let result = create_commit_request(diff.clone(), 5000, Model::GPT45); assert!(result.is_ok(), "Should handle multiple programming languages"); let request = result.unwrap(); @@ -514,13 +513,13 @@ fn test_template_contains_required_sections() { #[test] fn test_request_structure_completeness() { let diff = "diff --git a/test.txt b/test.txt\n+test".to_string(); - let request = create_commit_request(diff.clone(), 1000, Model::GPT4oMini).unwrap(); + let request = create_commit_request(diff.clone(), 1000, Model::GPT41Mini).unwrap(); // Verify request has all required components assert!(!request.system.is_empty(), "System prompt should not be empty"); assert_eq!(request.prompt, diff, "User prompt should match input diff"); assert_eq!(request.max_tokens, 1000, "Max tokens should be set correctly"); - assert_eq!(request.model, Model::GPT4oMini, "Model should be set correctly"); + assert_eq!(request.model, Model::GPT41Mini, "Model should be set correctly"); // Verify system prompt has reasonable length assert!(request.system.len() > 500, "System prompt should be substantial"); @@ -547,10 +546,10 @@ index 123abc..456def 100644 .to_string(); // Test the full workflow - let model = Model::GPT4oMini; + let model = Model::GPT41Mini; let template = get_instruction_template().unwrap(); let token_count = token_used(&model).unwrap(); - let request = create_commit_request(simple_diff.clone(), 2000, model.clone()).unwrap(); + let request = create_commit_request(simple_diff.clone(), 2000, model).unwrap(); // Verify all components work together assert!(!template.is_empty(), "Template should be generated"); @@ -563,7 +562,7 @@ index 123abc..456def 100644 #[test] fn test_end_to_end_with_token_limits() { // Test that we can calculate tokens for both template and diff - let model = Model::GPT4o; + let model = Model::GPT45; let diff = r#"diff --git a/src/main.rs b/src/main.rs index abc..def 100644 --- a/src/main.rs diff --git a/tests/model_custom_test.rs b/tests/model_custom_test.rs deleted file mode 100644 index c3c42040..00000000 --- a/tests/model_custom_test.rs +++ /dev/null @@ -1,68 +0,0 @@ -use ai::model::Model; - -#[test] -fn test_custom_model_creation() { - // Test that custom model names are accepted - let custom_model: Model = "custom-gpt-model".into(); - match custom_model { - Model::Custom(name) => assert_eq!(name, "custom-gpt-model"), - _ => panic!("Should create Custom variant") - } -} - -#[test] -fn test_predefined_model_creation() { - // Test that predefined models still work - let gpt4: Model = "gpt-4".into(); - assert_eq!(gpt4, Model::GPT4); - - let gpt4o: Model = "gpt-4o".into(); - assert_eq!(gpt4o, Model::GPT4o); -} - -#[test] -fn test_model_string_conversion() { - let custom_model = Model::Custom("my-custom-model".to_string()); - let model_str = String::from(&custom_model); - assert_eq!(model_str, "my-custom-model"); - - let gpt4 = Model::GPT4; - let gpt4_str = String::from(&gpt4); - assert_eq!(gpt4_str, "gpt-4"); -} - -#[test] -fn test_model_display() { - let custom_model = Model::Custom("test-model".to_string()); - assert_eq!(format!("{}", custom_model), "test-model"); -} - -#[test] -fn test_custom_model_context_size() { - let custom_model = Model::Custom("unknown-model".to_string()); - // Should use default context size for unknown models - assert_eq!(custom_model.context_size(), 128000); -} - -#[test] -fn test_custom_model_token_counting() { - let custom_model = Model::Custom("test-model".to_string()); - let result = custom_model.count_tokens("Hello world"); - assert!(result.is_ok(), "Token counting should work for custom models"); - assert!(result.unwrap() > 0, "Should count tokens"); -} - -#[test] -fn test_from_str_parsing() { - // Test parsing various model names - let model1: Result = "gpt-4o".parse(); - assert!(model1.is_ok()); - assert_eq!(model1.unwrap(), Model::GPT4o); - - let model2: Result = "custom-model-name".parse(); - assert!(model2.is_ok()); - match model2.unwrap() { - Model::Custom(name) => assert_eq!(name, "custom-model-name"), - _ => panic!("Should create Custom variant") - } -} \ No newline at end of file diff --git a/tests/model_token_test.rs b/tests/model_token_test.rs index e3ca955e..3dc1ddae 100644 --- a/tests/model_token_test.rs +++ b/tests/model_token_test.rs @@ -2,7 +2,7 @@ use ai::model::Model; #[test] fn test_token_counting_accuracy() { - let model = Model::GPT4; + let model = Model::GPT41; // Test various text lengths to ensure we're not underestimating let test_cases = vec![ @@ -46,7 +46,7 @@ fn test_token_counting_accuracy() { #[test] fn test_no_underestimation_for_context_limit() { - let model = Model::GPT4; + let model = Model::GPT41; // Create text that would be underestimated by the old heuristics // Old heuristic: ~4 chars per token, but actual can be much different @@ -79,7 +79,7 @@ fn test_no_underestimation_for_context_limit() { #[test] fn test_token_counting_consistency() { - let model = Model::GPT4; + let model = Model::GPT41; // Test that the same text always returns the same token count let test_text = "The quick brown fox jumps over the lazy dog. This is a test sentence with various words."; @@ -95,7 +95,7 @@ fn test_token_counting_consistency() { #[test] fn test_long_text_token_counting() { - let model = Model::GPT4; + let model = Model::GPT41; // Test with a longer text to ensure we're using the tokenizer properly let long_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. ".repeat(50); From c47ceef2108a39c4e97b09a0feddc086e9347c43 Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:11:26 +0200 Subject: [PATCH 04/10] Implement AsRef for Model to improve string handling --- src/model.rs | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/src/model.rs b/src/model.rs index 4864e8eb..6924d385 100644 --- a/src/model.rs +++ b/src/model.rs @@ -60,8 +60,7 @@ impl Model { // Always use the proper tokenizer for accurate counts // We cannot afford to underestimate tokens as it may cause API failures let tokenizer = TOKENIZER.get_or_init(|| { - let model_str = String::from(self); - get_tokenizer(&model_str) + get_tokenizer(self.as_ref()) }); // Use direct tokenization for accurate token count @@ -75,8 +74,7 @@ impl Model { /// * `usize` - The maximum number of tokens the model can process pub fn context_size(&self) -> usize { profile!("Get context size"); - let model_str = String::from(self); - get_context_size(&model_str) + get_context_size(self.as_ref()) } /// Truncates the given text to fit within the specified token limit. @@ -167,21 +165,28 @@ impl Model { } } +impl AsRef for Model { + fn as_ref(&self) -> &str { + match self { + Model::GPT41 => MODEL_GPT4_1, + Model::GPT41Mini => MODEL_GPT4_1_MINI, + Model::GPT41Nano => MODEL_GPT4_1_NANO, + Model::GPT45 => MODEL_GPT4_5 + } + } +} + +// Keep conversion to String for cases that need owned strings impl From<&Model> for String { fn from(model: &Model) -> Self { - match model { - Model::GPT41 => MODEL_GPT4_1.to_string(), - Model::GPT41Mini => MODEL_GPT4_1_MINI.to_string(), - Model::GPT41Nano => MODEL_GPT4_1_NANO.to_string(), - Model::GPT45 => MODEL_GPT4_5.to_string() - } + model.as_ref().to_string() } } // Keep the old impl for backwards compatibility where possible impl Model { - pub fn as_str(&self) -> String { - self.into() + pub fn as_str(&self) -> &str { + self.as_ref() } } @@ -201,7 +206,7 @@ impl FromStr for Model { impl Display for Model { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", String::from(self)) + write!(f, "{}", self.as_ref()) } } From c4a5a516e389fa83e41fcbbe82d1831c043ed346 Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:11:49 +0200 Subject: [PATCH 05/10] Optimize string allocations by implementing AsRef for Model - Implement AsRef for Model to avoid unnecessary String allocations - Update count_tokens() and context_size() to use as_ref() directly - Change From<&Model> for String to use as_ref().to_string() - Update Display impl and as_str() method to return &str instead of String - Eliminates multiple unnecessary heap allocations per call Resolves review comments about inefficient string conversions. --- src/model.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/model.rs b/src/model.rs index 6924d385..1246c7f2 100644 --- a/src/model.rs +++ b/src/model.rs @@ -59,9 +59,7 @@ impl Model { // Always use the proper tokenizer for accurate counts // We cannot afford to underestimate tokens as it may cause API failures - let tokenizer = TOKENIZER.get_or_init(|| { - get_tokenizer(self.as_ref()) - }); + let tokenizer = TOKENIZER.get_or_init(|| get_tokenizer(self.as_ref())); // Use direct tokenization for accurate token count let tokens = tokenizer.encode_ordinary(text); From 04831dc21141795720e3729afe03357b7a4eae01 Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:17:25 +0200 Subject: [PATCH 06/10] Update default model and add backward compatibility for deprecated model names --- src/config.rs | 2 +- src/model.rs | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/config.rs b/src/config.rs index bb5fd09e..5f18b355 100644 --- a/src/config.rs +++ b/src/config.rs @@ -12,7 +12,7 @@ use console::Emoji; const DEFAULT_TIMEOUT: i64 = 30; const DEFAULT_MAX_COMMIT_LENGTH: i64 = 72; const DEFAULT_MAX_TOKENS: i64 = 2024; -const DEFAULT_MODEL: &str = "gpt-4o-mini"; +const DEFAULT_MODEL: &str = "gpt-4.1"; // Matches Model::default() const DEFAULT_API_KEY: &str = ""; #[derive(Debug, Default, Deserialize, PartialEq, Eq, Serialize)] diff --git a/src/model.rs b/src/model.rs index 1246c7f2..90f0db22 100644 --- a/src/model.rs +++ b/src/model.rs @@ -192,12 +192,33 @@ impl FromStr for Model { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - match s.trim().to_lowercase().as_str() { + let normalized = s.trim().to_lowercase(); + match normalized.as_str() { "gpt-4.1" => Ok(Model::GPT41), "gpt-4.1-mini" => Ok(Model::GPT41Mini), "gpt-4.1-nano" => Ok(Model::GPT41Nano), "gpt-4.5" => Ok(Model::GPT45), - model => bail!("Invalid model name: {}", model) + // Backward compatibility for deprecated models - map to closest GPT-4.1 equivalent + "gpt-4" | "gpt-4o" => { + log::warn!( + "Model '{}' is deprecated. Mapping to 'gpt-4.1'. \ + Please update your configuration with: git ai config set model gpt-4.1", + s + ); + Ok(Model::GPT41) + } + "gpt-4o-mini" | "gpt-3.5-turbo" => { + log::warn!( + "Model '{}' is deprecated. Mapping to 'gpt-4.1-mini'. \ + Please update your configuration with: git ai config set model gpt-4.1-mini", + s + ); + Ok(Model::GPT41Mini) + } + model => bail!( + "Invalid model name: '{}'. Supported models: gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.5", + model + ) } } } @@ -211,7 +232,13 @@ impl Display for Model { // Implement conversion from string types to Model with fallback to default impl From<&str> for Model { fn from(s: &str) -> Self { - s.parse().unwrap_or_default() + s.parse().unwrap_or_else(|e| { + log::error!( + "Failed to parse model '{}': {}. Falling back to default model 'gpt-4.1'.", + s, e + ); + Model::default() + }) } } From 59cb09dfb8d73a7b02a3e5b5ced98fd59c986b7e Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:17:52 +0200 Subject: [PATCH 07/10] Add comprehensive tests for model name parsing. --- tests/model_validation_test.rs | 101 +++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 tests/model_validation_test.rs diff --git a/tests/model_validation_test.rs b/tests/model_validation_test.rs new file mode 100644 index 00000000..c1ed29b7 --- /dev/null +++ b/tests/model_validation_test.rs @@ -0,0 +1,101 @@ +use ai::model::Model; +use std::str::FromStr; + +#[test] +fn test_valid_model_names() { + // Test all supported model names + assert_eq!(Model::from_str("gpt-4.1").unwrap(), Model::GPT41); + assert_eq!(Model::from_str("gpt-4.1-mini").unwrap(), Model::GPT41Mini); + assert_eq!(Model::from_str("gpt-4.1-nano").unwrap(), Model::GPT41Nano); + assert_eq!(Model::from_str("gpt-4.5").unwrap(), Model::GPT45); +} + +#[test] +fn test_case_insensitive_parsing() { + // Test that model names are case-insensitive + assert_eq!(Model::from_str("GPT-4.1").unwrap(), Model::GPT41); + assert_eq!(Model::from_str("Gpt-4.1-Mini").unwrap(), Model::GPT41Mini); + assert_eq!(Model::from_str("GPT-4.1-NANO").unwrap(), Model::GPT41Nano); + assert_eq!(Model::from_str("gPt-4.5").unwrap(), Model::GPT45); +} + +#[test] +fn test_whitespace_handling() { + // Test that leading/trailing whitespace is trimmed + assert_eq!(Model::from_str(" gpt-4.1 ").unwrap(), Model::GPT41); + assert_eq!(Model::from_str("\tgpt-4.1-mini\n").unwrap(), Model::GPT41Mini); +} + +#[test] +fn test_deprecated_model_backward_compat() { + // Test that deprecated models map to their GPT-4.1 equivalents + // These should succeed but log warnings + assert_eq!(Model::from_str("gpt-4").unwrap(), Model::GPT41); + assert_eq!(Model::from_str("gpt-4o").unwrap(), Model::GPT41); + assert_eq!(Model::from_str("gpt-4o-mini").unwrap(), Model::GPT41Mini); + assert_eq!(Model::from_str("gpt-3.5-turbo").unwrap(), Model::GPT41Mini); +} + +#[test] +fn test_invalid_model_name() { + // Test that invalid model names return an error + let result = Model::from_str("does-not-exist"); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Invalid model name")); +} + +#[test] +fn test_invalid_model_fallback() { + // Test that From<&str> falls back to default for invalid models + let model = Model::from("invalid-model"); + assert_eq!(model, Model::default()); + assert_eq!(model, Model::GPT41); +} + +#[test] +fn test_model_display() { + // Test that models display correctly + assert_eq!(Model::GPT41.to_string(), "gpt-4.1"); + assert_eq!(Model::GPT41Mini.to_string(), "gpt-4.1-mini"); + assert_eq!(Model::GPT41Nano.to_string(), "gpt-4.1-nano"); + assert_eq!(Model::GPT45.to_string(), "gpt-4.5"); +} + +#[test] +fn test_model_as_str() { + // Test the as_str() method + assert_eq!(Model::GPT41.as_str(), "gpt-4.1"); + assert_eq!(Model::GPT41Mini.as_str(), "gpt-4.1-mini"); + assert_eq!(Model::GPT41Nano.as_str(), "gpt-4.1-nano"); + assert_eq!(Model::GPT45.as_str(), "gpt-4.5"); +} + +#[test] +fn test_model_as_ref() { + // Test the AsRef implementation + fn takes_str_ref>(s: S) -> String { + s.as_ref().to_string() + } + + assert_eq!(takes_str_ref(&Model::GPT41), "gpt-4.1"); + assert_eq!(takes_str_ref(&Model::GPT41Mini), "gpt-4.1-mini"); +} + +#[test] +fn test_model_from_string() { + // Test conversion from String + let s = String::from("gpt-4.1"); + assert_eq!(Model::from(s), Model::GPT41); + + let s = String::from("gpt-4.1-mini"); + assert_eq!(Model::from(s), Model::GPT41Mini); +} + +#[test] +fn test_default_model() { + // Test that the default model is GPT41 + assert_eq!(Model::default(), Model::GPT41); +} From c6a862130716f8f865af23db1ffc110124a98799 Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:19:02 +0200 Subject: [PATCH 08/10] Add documentation for GPT-4.1 model support and deprecation notices for older models --- MIGRATION.md | 135 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 MIGRATION.md diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..e7468d5f --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,135 @@ +# Migration Guide + +## Model Changes (v1.0.9+) + +### Overview +Git AI now supports GPT-4.1 family models exclusively. The previous models (GPT-4, GPT-4o, GPT-4o-mini) are deprecated but will continue to work with automatic mapping to equivalent GPT-4.1 models. + +### Supported Models + +| Model Name | Description | Use Case | +|------------|-------------|----------| +| `gpt-4.1` | Default model | General purpose, best quality (default) | +| `gpt-4.1-mini` | Faster processing | Balanced speed and quality | +| `gpt-4.1-nano` | Ultra-fast | Maximum speed | +| `gpt-4.5` | Advanced model | Complex commits requiring more context | + +### Deprecated Models (Backward Compatible) + +The following models are deprecated but will automatically map to GPT-4.1 equivalents: + +| Deprecated Model | Maps To | Warning Level | +|------------------|---------|---------------| +| `gpt-4` | `gpt-4.1` | Deprecation warning logged | +| `gpt-4o` | `gpt-4.1` | Deprecation warning logged | +| `gpt-4o-mini` | `gpt-4.1-mini` | Deprecation warning logged | +| `gpt-3.5-turbo` | `gpt-4.1-mini` | Deprecation warning logged | + +### Migration Steps + +#### 1. Check Your Current Model + +```bash +cat ~/.config/git-ai/config.ini | grep model +``` + +#### 2. Update to a Supported Model + +If you're using a deprecated model, update your configuration: + +```bash +# For best quality (default) +git ai config set model gpt-4.1 + +# For balanced speed and quality +git ai config set model gpt-4.1-mini + +# For maximum speed +git ai config set model gpt-4.1-nano + +# For complex commits +git ai config set model gpt-4.5 +``` + +### Behavior Changes + +#### Before +```bash +$ git ai config set model gpt-4o +✅ Model set to: gpt-4o +$ git commit --no-edit +# Uses gpt-4o directly +``` + +#### After +```bash +$ git ai config set model gpt-4o +✅ Model set to: gpt-4o +$ git commit --no-edit +⚠️ WARN: Model 'gpt-4o' is deprecated. Mapping to 'gpt-4.1'. + Please update your configuration with: git ai config set model gpt-4.1 +# Commit proceeds using gpt-4.1 +``` + +### Invalid Model Names + +If you configure an invalid model name: + +1. **Configuration**: Accepts any string (for forward compatibility) +2. **Runtime**: Logs an error and falls back to `gpt-4.1` (default) +3. **OpenAI API Error**: If OpenAI doesn't recognize the model, the error is logged before fallback + +```bash +$ git ai config set model does-not-exist +✅ Model set to: does-not-exist + +$ git commit --no-edit +❌ ERROR: Failed to parse model 'does-not-exist': Invalid model name: 'does-not-exist'. + Falling back to default model 'gpt-4.1'. +# Commit proceeds using gpt-4.1 +``` + +### Testing Your Configuration + +Test your model configuration without making a commit: + +```bash +# Set test mode +export RUST_LOG=debug + +# Make a dummy change +echo "test" >> test.txt +git add test.txt + +# Try committing (you can abort if needed) +git commit --no-edit + +# Check the logs for model information +# Should show: "Using model: gpt-4.1, Tokens: X, ..." +``` + +### Rollback + +If you need to rollback to the previous version: + +```bash +# Uninstall current version +cargo uninstall git-ai + +# Install specific older version (before model changes) +cargo install git-ai --version 1.0.8 +``` + +### Questions? + +- **Q: Will my old model configuration stop working?** + A: No, deprecated models automatically map to GPT-4.1 equivalents with a warning. + +- **Q: Why the change to GPT-4.1 models?** + A: GPT-4.1 models offer better performance, lower latency, and reduced costs while matching or exceeding the quality of previous models. + +- **Q: What if I want to use a model not in the list?** + A: Currently only the 4 GPT-4.1 family models are supported. Custom models will fall back to the default with an error logged. + +- **Q: Can I still use GPT-4o?** + A: Yes, but it will automatically map to GPT-4.1. Update your config to avoid deprecation warnings. From 02818420bbfb971bace4743c62027b542e2aecbc Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:21:04 +0200 Subject: [PATCH 09/10] Delete MIGRATION.md --- MIGRATION.md | 135 --------------------------------------------------- 1 file changed, 135 deletions(-) delete mode 100644 MIGRATION.md diff --git a/MIGRATION.md b/MIGRATION.md deleted file mode 100644 index e7468d5f..00000000 --- a/MIGRATION.md +++ /dev/null @@ -1,135 +0,0 @@ -# Migration Guide - -## Model Changes (v1.0.9+) - -### Overview -Git AI now supports GPT-4.1 family models exclusively. The previous models (GPT-4, GPT-4o, GPT-4o-mini) are deprecated but will continue to work with automatic mapping to equivalent GPT-4.1 models. - -### Supported Models - -| Model Name | Description | Use Case | -|------------|-------------|----------| -| `gpt-4.1` | Default model | General purpose, best quality (default) | -| `gpt-4.1-mini` | Faster processing | Balanced speed and quality | -| `gpt-4.1-nano` | Ultra-fast | Maximum speed | -| `gpt-4.5` | Advanced model | Complex commits requiring more context | - -### Deprecated Models (Backward Compatible) - -The following models are deprecated but will automatically map to GPT-4.1 equivalents: - -| Deprecated Model | Maps To | Warning Level | -|------------------|---------|---------------| -| `gpt-4` | `gpt-4.1` | Deprecation warning logged | -| `gpt-4o` | `gpt-4.1` | Deprecation warning logged | -| `gpt-4o-mini` | `gpt-4.1-mini` | Deprecation warning logged | -| `gpt-3.5-turbo` | `gpt-4.1-mini` | Deprecation warning logged | - -### Migration Steps - -#### 1. Check Your Current Model - -```bash -cat ~/.config/git-ai/config.ini | grep model -``` - -#### 2. Update to a Supported Model - -If you're using a deprecated model, update your configuration: - -```bash -# For best quality (default) -git ai config set model gpt-4.1 - -# For balanced speed and quality -git ai config set model gpt-4.1-mini - -# For maximum speed -git ai config set model gpt-4.1-nano - -# For complex commits -git ai config set model gpt-4.5 -``` - -### Behavior Changes - -#### Before -```bash -$ git ai config set model gpt-4o -✅ Model set to: gpt-4o -$ git commit --no-edit -# Uses gpt-4o directly -``` - -#### After -```bash -$ git ai config set model gpt-4o -✅ Model set to: gpt-4o -$ git commit --no-edit -⚠️ WARN: Model 'gpt-4o' is deprecated. Mapping to 'gpt-4.1'. - Please update your configuration with: git ai config set model gpt-4.1 -# Commit proceeds using gpt-4.1 -``` - -### Invalid Model Names - -If you configure an invalid model name: - -1. **Configuration**: Accepts any string (for forward compatibility) -2. **Runtime**: Logs an error and falls back to `gpt-4.1` (default) -3. **OpenAI API Error**: If OpenAI doesn't recognize the model, the error is logged before fallback - -```bash -$ git ai config set model does-not-exist -✅ Model set to: does-not-exist - -$ git commit --no-edit -❌ ERROR: Failed to parse model 'does-not-exist': Invalid model name: 'does-not-exist'. - Falling back to default model 'gpt-4.1'. -# Commit proceeds using gpt-4.1 -``` - -### Testing Your Configuration - -Test your model configuration without making a commit: - -```bash -# Set test mode -export RUST_LOG=debug - -# Make a dummy change -echo "test" >> test.txt -git add test.txt - -# Try committing (you can abort if needed) -git commit --no-edit - -# Check the logs for model information -# Should show: "Using model: gpt-4.1, Tokens: X, ..." -``` - -### Rollback - -If you need to rollback to the previous version: - -```bash -# Uninstall current version -cargo uninstall git-ai - -# Install specific older version (before model changes) -cargo install git-ai --version 1.0.8 -``` - -### Questions? - -- **Q: Will my old model configuration stop working?** - A: No, deprecated models automatically map to GPT-4.1 equivalents with a warning. - -- **Q: Why the change to GPT-4.1 models?** - A: GPT-4.1 models offer better performance, lower latency, and reduced costs while matching or exceeding the quality of previous models. - -- **Q: What if I want to use a model not in the list?** - A: Currently only the 4 GPT-4.1 family models are supported. Custom models will fall back to the default with an error logged. - -- **Q: Can I still use GPT-4o?** - A: Yes, but it will automatically map to GPT-4.1. Update your config to avoid deprecation warnings. From b2075126bc99ff95aeff54f8d454609701116490 Mon Sep 17 00:00:00 2001 From: Linus Oleander <220827+oleander@users.noreply.github.com> Date: Sun, 5 Oct 2025 23:22:02 +0200 Subject: [PATCH 10/10] Format model parse error log as single line --- src/model.rs | 14 ++++++-------- tests/model_validation_test.rs | 3 ++- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/model.rs b/src/model.rs index 90f0db22..5b759674 100644 --- a/src/model.rs +++ b/src/model.rs @@ -215,10 +215,11 @@ impl FromStr for Model { ); Ok(Model::GPT41Mini) } - model => bail!( - "Invalid model name: '{}'. Supported models: gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.5", - model - ) + model => + bail!( + "Invalid model name: '{}'. Supported models: gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.5", + model + ), } } } @@ -233,10 +234,7 @@ impl Display for Model { impl From<&str> for Model { fn from(s: &str) -> Self { s.parse().unwrap_or_else(|e| { - log::error!( - "Failed to parse model '{}': {}. Falling back to default model 'gpt-4.1'.", - s, e - ); + log::error!("Failed to parse model '{}': {}. Falling back to default model 'gpt-4.1'.", s, e); Model::default() }) } diff --git a/tests/model_validation_test.rs b/tests/model_validation_test.rs index c1ed29b7..6497b4e2 100644 --- a/tests/model_validation_test.rs +++ b/tests/model_validation_test.rs @@ -1,6 +1,7 @@ -use ai::model::Model; use std::str::FromStr; +use ai::model::Model; + #[test] fn test_valid_model_names() { // Test all supported model names