Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ reqwest = { version = "^0.13", features = ["json", "blocking"] }
serde = { version = "^1", features = ["derive"] }
serde_json = "^1"
toml = "^1.0"
commitbot_macros = { version = "0.1.0", path = "commitbot-macros" }

[dev-dependencies]
assert_cmd = "^2.1.1"
Expand Down
12 changes: 10 additions & 2 deletions Taskfile.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ tasks:
- cargo test --test prompt_live -- --ignored --nocapture

run:simple:
desc: "Run commitbot in simple mode (no model, debug) using cargo run"
desc: "Run commitbot in simple mode (no model, info verbosity) using cargo run"
cmds:
- cargo run -- -vv
- cargo run -- -v

run:ask:
desc: "Run commitbot in interactive ask mode (no model, debug)"
Expand All @@ -53,8 +53,16 @@ tasks:
install:
desc: "Install commitbot binary locally via cargo install --path ."
cmds:
- brew uninstall mikegarde/tap/commitbot || true
- cargo install --path . --force

install:brew:
desc: "Install commitbot via Homebrew (macOS)"
cmds:
- cargo uninstall commitbot || true
- brew uninstall mikegarde/tap/commitbot || true
- brew install mikegarde/tap/commitbot

build:release:
desc: "Build optimized release binary for the host"
silent: true
Expand Down
13 changes: 13 additions & 0 deletions commitbot-macros/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[package]
name = "commitbot_macros"
version = "0.1.0"
edition = "2024"

[lib]
proc-macro = true

Comment thread
MikeGarde marked this conversation as resolved.
[dependencies]
proc-macro2 = "1"
quote = "1"
syn = { version = "2", features = ["full"] }

44 changes: 44 additions & 0 deletions commitbot-macros/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
use proc_macro::TokenStream;
use quote::quote;
use syn::{parse_macro_input, DeriveInput, Data, DataStruct, Fields};

#[proc_macro_derive(SensitiveFields, attributes(sensitive))]
pub fn derive_sensitive_fields(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let name = input.ident;

let mut sensitive_fields: Vec<String> = Vec::new();

if let Data::Struct(DataStruct { fields: Fields::Named(ref fields_named), .. }) = input.data {
for field in fields_named.named.iter() {
for attr in &field.attrs {
if let Some(ident) = attr.path().get_ident() {
if ident == "sensitive" {
if let Some(ident_field) = &field.ident {
sensitive_fields.push(ident_field.to_string());
}
}
}
}
}
}
Comment thread
MikeGarde marked this conversation as resolved.

// build an array of string literals
let literals: Vec<proc_macro2::TokenStream> = sensitive_fields
.iter()
.map(|s| {
let lit = syn::LitStr::new(s, proc_macro2::Span::call_site());
quote! { #lit }
})
.collect();

let expanded = quote! {
impl #name {
pub fn sensitive_field_names() -> &'static [&'static str] {
&[#(#literals),*]
}
}
};

TokenStream::from(expanded)
}
39 changes: 34 additions & 5 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@ use std::fs;
use std::path::{Path, PathBuf};

/// Final resolved configuration for commitbot.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, commitbot_macros::SensitiveFields)]
pub struct Config {
/// LLM provider (openai, ollama)
pub provider: String,
/// OpenAI API key for authentication
#[sensitive]
pub openai_api_key: Option<String>,
/// Base URL for the LLM provider
pub base_url: Option<String>,
Expand Down Expand Up @@ -253,24 +254,52 @@ impl<'a> ConfigResolver<'a> {
}

fn log_decision<T: std::fmt::Debug>(&self, key: &str, value: &T, src: ValueSource) {
// Redact sensitive keys (API keys, tokens, secrets) when printing
// so verbose/debug logs don't leak credentials. For option-like
// values we print <set>/<unset>, otherwise we print <redacted>.
let printable = if self.is_sensitive_key(key) {
// Try to detect Option<T> formatted output ("Some(...)" / "None").
let debug_str = format!("{:?}", value);
if debug_str == "None" {
"<unset>".to_string()
} else if debug_str.starts_with("Some(") {
"<set>".to_string()
} else {
"<redacted>".to_string()
}
} else {
format!("{:?}", value)
};
Comment thread
MikeGarde marked this conversation as resolved.

if let Some(env_key) = self.env_key_for(key) {
log::debug!(
"Config: {} = {:?} (source={}, env={})",
"Config: {} = {} (source={}, env={})",
key,
value,
printable,
source_label(src),
env_key
);
} else {
log::debug!(
"Config: {} = {:?} (source={})",
"Config: {} = {} (source={})",
key,
value,
printable,
source_label(src)
);
}
}

// Treat a small set of keys as sensitive so logs redact them.
// Add other names here if you later introduce more secrets.
// Currently only the OpenAI API key is treated as sensitive. We keep the
// check centralized here so it can later be expanded. The field in the
// structs is annotated with a doc comment to indicate sensitivity.
fn is_sensitive_key(&self, key: &str) -> bool {
// Use the generated list from the Config struct (proc-macro)
// to determine which fields are sensitive.
Config::sensitive_field_names().contains(&key)
}

fn log_decision_secret_opt_string(&self, key: &str, present: bool, src: ValueSource) {
let printable = if present { "<set>" } else { "<unset>" };
if let Some(env_key) = self.env_key_for(key) {
Expand Down
5 changes: 5 additions & 0 deletions src/llm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,9 @@ pub trait LlmClient: Send + Sync {
items: &[PrItem],
ticket_summary: Option<&str>,
) -> Result<String>;

/// Take aggregated token usage from the client, resetting counters.
fn take_and_reset_usage(&self) -> Option<(u64, u64, u64)> {
None
}
}
69 changes: 64 additions & 5 deletions src/llm/ollama.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use musli::{Decode, Encode};
use reqwest::StatusCode;
use reqwest::blocking::Client;
use std::io::BufReader;
use std::sync::Mutex;

use crate::FileChange;
use crate::git::{PrItem, PrSummaryMode};
Expand All @@ -29,6 +30,13 @@ struct OllamaStreamResponse {
done: Option<bool>,
}

#[derive(Debug, Decode)]
struct OllamaUsage {
prompt_tokens: Option<u32>,
completion_tokens: Option<u32>,
total_tokens: Option<u32>,
}

#[derive(Debug, Decode)]
struct OllamaTagModel {
name: String,
Expand All @@ -45,6 +53,14 @@ pub struct OllamaClient {
base_url: String,
model: String,
stream: bool,
usage: Mutex<TokenUsage>,
}

#[derive(Default)]
struct TokenUsage {
prompt_tokens: u64,
completion_tokens: u64,
total_tokens: u64,
}

impl OllamaClient {
Expand All @@ -57,6 +73,7 @@ impl OllamaClient {
base_url: base_url.into().trim_end_matches('/').to_string(),
model: model.into(),
stream,
usage: Mutex::new(TokenUsage::default()),
}
}

Expand Down Expand Up @@ -119,10 +136,36 @@ impl OllamaClient {

log::trace!("Ollama raw JSON response: {resp_text}");

let parsed: OllamaChatResponse =
json::from_str(&resp_text).map_err(|e| anyhow!("Failed to decode Ollama JSON: {e}"))?;
#[derive(Debug, Decode)]
struct OllamaChatWrapper {
message: OllamaMessage,
usage: Option<OllamaUsage>,
}

Ok(parsed.message.content.trim().to_string())
// First try decoding into the wrapper. If that fails fall back to the
// older shape without usage.
match json::from_str::<OllamaChatWrapper>(&resp_text) {
Ok(parsed) => {
if let Some(usage) = parsed.usage {
// Recover from a poisoned mutex instead of panicking so the CLI
// can continue in the face of concurrent thread panics.
let mut u = self.usage.lock().unwrap_or_else(|e| {
log::warn!("usage mutex was poisoned, recovering token counters");
e.into_inner()
});
u.prompt_tokens += usage.prompt_tokens.unwrap_or(0) as u64;
u.completion_tokens += usage.completion_tokens.unwrap_or(0) as u64;
u.total_tokens += usage.total_tokens.unwrap_or(0) as u64;
}
Ok(parsed.message.content.trim().to_string())
Comment thread
MikeGarde marked this conversation as resolved.
}
Err(_) => {
// Fallback to the simple response shape.
let parsed: OllamaChatResponse = json::from_str(&resp_text)
.map_err(|e| anyhow!("Failed to decode Ollama JSON: {e}"))?;
Ok(parsed.message.content.trim().to_string())
}
}
Comment thread
MikeGarde marked this conversation as resolved.
}

fn tags_url(&self) -> String {
Expand Down Expand Up @@ -222,7 +265,8 @@ impl LlmClient for OllamaClient {
ticket_summary: Option<&str>,
) -> Result<String> {
let prompts = prompt_builder::commit_message_prompt(branch, files, ticket_summary);
self.chat(prompts.system, prompts.user, self.stream)
let content = self.chat(prompts.system, prompts.user, self.stream)?;
Ok(content)
}

fn generate_pr_message(
Expand All @@ -240,7 +284,22 @@ impl LlmClient for OllamaClient {
items,
ticket_summary,
);
self.chat(prompts.system, prompts.user, self.stream)
let content = self.chat(prompts.system, prompts.user, self.stream)?;
Ok(content)
}
fn take_and_reset_usage(&self) -> Option<(u64, u64, u64)> {
let mut u = self.usage.lock().unwrap_or_else(|e| {
log::warn!("usage mutex was poisoned, recovering token counters");
e.into_inner()
});

if u.total_tokens > 0 {
let res = (u.prompt_tokens, u.completion_tokens, u.total_tokens);
*u = TokenUsage::default();
Some(res)
} else {
None
}
}
}

Expand Down
Loading
Loading