Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 48 additions & 2 deletions src/apps/desktop/src/api/commands.rs
Original file line number Diff line number Diff line change
Expand Up @@ -810,14 +810,23 @@ pub async fn test_ai_config_connection(
bitfun_core::service::config::types::ModelCategory::Multimodal
);

let ai_config = match request.config.try_into() {
let auth = request.config.auth.clone();
let mut ai_config: bitfun_core::util::types::AIConfig = match request.config.try_into() {
Ok(config) => config,
Err(e) => {
error!("Failed to convert AI config: {}", e);
return Err(format!("Failed to convert configuration: {}", e));
}
};

if let Err(e) =
bitfun_core::infrastructure::ai::client_factory::apply_cli_credential(&auth, &mut ai_config)
.await
{
error!("Failed to resolve CLI credential during test: {}", e);
return Err(format!("Failed to resolve CLI credential: {}", e));
}

let ai_client = bitfun_core::infrastructure::ai::AIClient::new(ai_config);

match ai_client.test_connection().await {
Expand Down Expand Up @@ -897,10 +906,14 @@ pub async fn list_ai_models_by_config(
request: ListAIModelsByConfigRequest,
) -> Result<Vec<bitfun_core::util::types::RemoteModelInfo>, String> {
let config_name = request.config.name.clone();
let ai_config = request
let auth = request.config.auth.clone();
let mut ai_config: bitfun_core::util::types::AIConfig = request
.config
.try_into()
.map_err(|e| format!("Failed to convert configuration: {}", e))?;
bitfun_core::infrastructure::ai::client_factory::apply_cli_credential(&auth, &mut ai_config)
.await
.map_err(|e| format!("Failed to resolve CLI credential: {}", e))?;
let ai_client = bitfun_core::infrastructure::ai::AIClient::new(ai_config);

ai_client.list_models().await.map_err(|e| {
Expand Down Expand Up @@ -2729,3 +2742,36 @@ pub async fn stop_file_watch(path: String) -> Result<(), String> {
pub async fn get_watched_paths() -> Result<Vec<String>, String> {
file_watch::get_watched_paths().await
}

#[tauri::command]
pub async fn discover_cli_credentials() -> Result<Vec<bitfun_core::infrastructure::cli_credentials::DiscoveredCredential>, String> {
Ok(bitfun_core::infrastructure::cli_credentials::discover_all().await)
}

#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RefreshCliCredentialRequest {
pub kind: bitfun_core::infrastructure::cli_credentials::CliCredentialKind,
}

#[tauri::command]
pub async fn refresh_cli_credential(
request: RefreshCliCredentialRequest,
) -> Result<bitfun_core::infrastructure::cli_credentials::DiscoveredCredential, String> {
use bitfun_core::infrastructure::cli_credentials::{
codex::CodexResolver, gemini::GeminiResolver, CliCredentialKind, CredentialResolver,
};
// Force a refresh by calling resolve(), then re-discover for the latest metadata.
let resolved = match request.kind {
CliCredentialKind::Codex => CodexResolver.resolve().await,
CliCredentialKind::Gemini => GeminiResolver.resolve().await,
};
if let Err(e) = resolved {
return Err(format!("Refresh failed: {}", e));
}
let discovered = bitfun_core::infrastructure::cli_credentials::discover_all().await;
discovered
.into_iter()
.find(|c| c.kind == request.kind)
.ok_or_else(|| "Credential not found after refresh".to_string())
}
2 changes: 2 additions & 0 deletions src/apps/desktop/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,8 @@ pub async fn run() {
test_ai_connection,
test_ai_config_connection,
list_ai_models_by_config,
discover_cli_credentials,
refresh_cli_credential,
initialize_ai,
set_agent_model,
get_agent_models,
Expand Down
4 changes: 4 additions & 0 deletions src/crates/ai-adapters/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,9 @@ impl AIClient {
ApiFormat::Gemini => {
gemini::request::send_stream(self, messages, tools, extra_body, max_tries).await
}
ApiFormat::GeminiCodeAssist => {
gemini::code_assist::send_stream(self, messages, tools, extra_body, max_tries).await
}
}
}

Expand Down Expand Up @@ -145,6 +148,7 @@ impl AIClient {
}
ApiFormat::Anthropic => anthropic::discovery::list_models(self).await,
ApiFormat::Gemini => gemini::discovery::list_models(self).await,
ApiFormat::GeminiCodeAssist => gemini::code_assist::list_models(self).await,
}
}
}
Expand Down
7 changes: 7 additions & 0 deletions src/crates/ai-adapters/src/client/format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ pub(crate) enum ApiFormat {
OpenAIResponses,
Anthropic,
Gemini,
/// Google Cloud Code Assist (`cloudcode-pa.googleapis.com`) used by
/// `gemini-cli` in personal-OAuth mode. The wire format is the regular
/// Gemini body, but wrapped as `{ "model", "project", "request": { ... } }`.
GeminiCodeAssist,
}

impl ApiFormat {
Expand All @@ -16,6 +20,9 @@ impl ApiFormat {
"response" | "responses" => Ok(Self::OpenAIResponses),
"anthropic" => Ok(Self::Anthropic),
"gemini" | "google" => Ok(Self::Gemini),
"gemini-code-assist" | "gemini_code_assist" | "code-assist" => {
Ok(Self::GeminiCodeAssist)
}
_ => Err(anyhow!("Unknown API format: {}", value)),
}
}
Expand Down
209 changes: 209 additions & 0 deletions src/crates/ai-adapters/src/providers/gemini/code_assist.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
//! Google Cloud Code Assist transport (`cloudcode-pa.googleapis.com`).
//!
//! Used by `gemini-cli` after a personal Google login. The endpoint accepts the
//! regular Gemini request body but wrapped in
//! `{ "model": "...", "project": "...", "request": { ... } }` and authenticated
//! with a Bearer access_token (we don't pass `x-goog-api-key`).

use super::{request as gemini_request, GeminiMessageConverter};
use crate::client::sse::execute_sse_request;
use crate::client::{AIClient, StreamResponse};
use crate::providers::shared;
use crate::stream::handle_gemini_stream;
use crate::types::{Message, RemoteModelInfo, ToolDefinition};
use anyhow::{anyhow, Result};
use log::debug;
use reqwest::RequestBuilder;
use serde::Deserialize;
use std::sync::OnceLock;
use tokio::sync::Mutex;

const CODE_ASSIST_BASE: &str = "https://cloudcode-pa.googleapis.com";
const STREAM_ENDPOINT: &str = "/v1internal:streamGenerateContent?alt=sse";
const LOAD_CODE_ASSIST_ENDPOINT: &str = "/v1internal:loadCodeAssist";
const ONBOARD_USER_ENDPOINT: &str = "/v1internal:onboardUser";

fn cached_project() -> &'static Mutex<Option<String>> {
static CACHE: OnceLock<Mutex<Option<String>>> = OnceLock::new();
CACHE.get_or_init(|| Mutex::new(None))
}

pub(crate) fn apply_headers(client: &AIClient, builder: RequestBuilder) -> RequestBuilder {
shared::apply_header_policy(client, builder, |builder| {
builder
.header("Content-Type", "application/json")
.header(
"Authorization",
format!("Bearer {}", client.config.api_key),
)
.header("User-Agent", "BitFun-CodeAssist/1.0")
})
}

#[derive(Debug, Deserialize)]
struct LoadCodeAssistResponse {
#[serde(default, rename = "cloudaicompanionProject")]
cloudaicompanion_project: Option<String>,
}

#[derive(Debug, Deserialize)]
struct OnboardOperation {
#[serde(default)]
done: Option<bool>,
#[serde(default)]
response: Option<OnboardResponse>,
}

#[derive(Debug, Deserialize)]
struct OnboardResponse {
#[serde(default, rename = "cloudaicompanionProject")]
cloudaicompanion_project: Option<OnboardProject>,
}

#[derive(Debug, Deserialize)]
struct OnboardProject {
#[serde(default)]
id: Option<String>,
}

async fn discover_project(client: &AIClient) -> Result<String> {
{
let guard = cached_project().lock().await;
if let Some(p) = guard.clone() {
return Ok(p);
}
}

if let Ok(env_project) = std::env::var("GOOGLE_CLOUD_PROJECT") {
if !env_project.is_empty() {
*cached_project().lock().await = Some(env_project.clone());
return Ok(env_project);
}
}

let metadata = serde_json::json!({
"ideType": "IDE_UNSPECIFIED",
"platform": "PLATFORM_UNSPECIFIED",
"pluginType": "GEMINI",
});

let load_url = format!("{}{}", CODE_ASSIST_BASE, LOAD_CODE_ASSIST_ENDPOINT);
let load_body = serde_json::json!({ "metadata": metadata });
let load_resp = apply_headers(client, client.client.post(&load_url))
.json(&load_body)
.send()
.await?;
let load_status = load_resp.status();
if !load_status.is_success() {
let body = load_resp.text().await.unwrap_or_default();
return Err(anyhow!(
"loadCodeAssist failed: HTTP {load_status}: {body}"
));
}
let load_parsed: LoadCodeAssistResponse = load_resp.json().await?;
if let Some(project) = load_parsed.cloudaicompanion_project.filter(|s| !s.is_empty()) {
*cached_project().lock().await = Some(project.clone());
return Ok(project);
}

// Need to onboard – create a free-tier Code Assist project.
let onboard_url = format!("{}{}", CODE_ASSIST_BASE, ONBOARD_USER_ENDPOINT);
let onboard_body = serde_json::json!({
"tierId": "free-tier",
"metadata": metadata,
});
let onboard_resp = apply_headers(client, client.client.post(&onboard_url))
.json(&onboard_body)
.send()
.await?;
let onboard_status = onboard_resp.status();
if !onboard_status.is_success() {
let body = onboard_resp.text().await.unwrap_or_default();
return Err(anyhow!(
"onboardUser failed: HTTP {onboard_status}: {body}"
));
}
let parsed: OnboardOperation = onboard_resp.json().await?;
if !parsed.done.unwrap_or(false) {
return Err(anyhow!("onboardUser did not complete in a single call"));
}
let project = parsed
.response
.and_then(|r| r.cloudaicompanion_project)
.and_then(|p| p.id)
.ok_or_else(|| anyhow!("onboardUser response missing project id"))?;
*cached_project().lock().await = Some(project.clone());
Ok(project)
}

pub(crate) async fn send_stream(
client: &AIClient,
messages: Vec<Message>,
tools: Option<Vec<ToolDefinition>>,
extra_body: Option<serde_json::Value>,
max_tries: usize,
) -> Result<StreamResponse> {
let project = discover_project(client).await?;

let (system_instruction, contents) =
GeminiMessageConverter::convert_messages(messages, &client.config.model);
let gemini_tools = GeminiMessageConverter::convert_tools(tools);
let inner = gemini_request::build_request_body(
client,
system_instruction,
contents,
gemini_tools,
extra_body,
);

let request_body = serde_json::json!({
"model": client.config.model,
"project": project,
"request": inner,
});

let url = if client.config.request_url.is_empty() {
format!("{}{}", CODE_ASSIST_BASE, STREAM_ENDPOINT)
} else {
client.config.request_url.clone()
};

debug!(
"Gemini Code Assist config: model={}, request_url={}, project={}, max_tries={}",
client.config.model, url, project, max_tries
);

let idle_timeout = client.stream_options.idle_timeout;
execute_sse_request(
"Gemini Code Assist Streaming API",
&url,
&request_body,
max_tries,
|| apply_headers(client, client.client.post(&url)),
move |response, tx, tx_raw| {
tokio::spawn(handle_gemini_stream(response, tx, tx_raw, idle_timeout));
},
)
.await
}

/// Code Assist (`cloudcode-pa.googleapis.com`) does not expose a list-models
/// endpoint; the upstream `gemini-cli` ships a hard-coded `VALID_GEMINI_MODELS`
/// set in `packages/core/src/config/models.ts`. We mirror its stable entries so
/// the BitFun model picker shows exactly what the CLI itself allows.
pub(crate) async fn list_models(_client: &AIClient) -> Result<Vec<RemoteModelInfo>> {
Ok(vec![
RemoteModelInfo {
id: "gemini-2.5-pro".to_string(),
display_name: Some("Gemini 2.5 Pro".to_string()),
},
RemoteModelInfo {
id: "gemini-2.5-flash".to_string(),
display_name: Some("Gemini 2.5 Flash".to_string()),
},
RemoteModelInfo {
id: "gemini-2.5-flash-lite".to_string(),
display_name: Some("Gemini 2.5 Flash-Lite".to_string()),
},
])
}
1 change: 1 addition & 0 deletions src/crates/ai-adapters/src/providers/gemini/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
//! Gemini provider module

pub mod code_assist;
pub mod discovery;
pub mod message_converter;
pub mod request;
Expand Down
Loading
Loading