Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
268 changes: 151 additions & 117 deletions src/llm_client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,38 @@ import { GeminiSettings } from './gemini_settings';
import { GoogleGenerativeAI, GenerativeModel } from "@google/generative-ai";
import { ChatCompletion, ChatCompletionCreateParams } from 'openai/resources/chat/completions';
import { ReadableStream as PolyfillReadableStream } from 'web-streams-polyfill';
import { TextEncoder, TextDecoder } from 'util';
import fs from 'fs';

const ReadableStream = globalThis.ReadableStream || PolyfillReadableStream;

const getPartialKey = (key: string | undefined | null): string => {
if (!key || key.length < 8) {
return key || "undefined/empty";
}
return `${key.substring(0, 5)}...${key.substring(key.length - 3)}`;
};
// Types for conversation history
interface ConversationMessage {
role: string;
content: string;
}

interface GeminiHistoryItem {
role: string;
parts: Array<{ text: string }>;
}

interface ConversationFile {
conversation_history: ConversationMessage[] | any[];
format: string;
version: string;
}

export class LlmClient {
public openai: OpenAi | null;
public gemini: GoogleGenerativeAI | null;
public model: string;
public openaiSettings: OpenAiSettings | null;
public geminiSettings: GeminiSettings | null;
private conversationHistory: ConversationMessage[] = [];

constructor(settings: OpenAiSettings | GeminiSettings) {
if ('url' in settings) {
// It's OpenAiSettings
// OpenAI Settings
this.openaiSettings = settings;
this.geminiSettings = null;
this.openai = new OpenAi({
Expand All @@ -33,153 +44,176 @@ export class LlmClient {
});
this.gemini = null;
} else {
// It's GeminiSettings
// Gemini Settings
this.geminiSettings = settings;
this.openaiSettings = null;

// EXTENSIVE DEBUG LOGGING
const envApiKey = process.env.GOOGLE_API_KEY;
const yamlApiKey = settings.token;

console.log(`[DEBUG_ENV] All environment variables:`, Object.keys(process.env).filter(k => k.includes('API') || k.includes('KEY') || k.includes('GOOGLE')));
console.log(`[DEBUG_ENV] GOOGLE_API_KEY exists:`, envApiKey !== undefined);
console.log(`[DEBUG_ENV] GOOGLE_API_KEY value:`, getPartialKey(envApiKey));
console.log(`[DEBUG_YAML] YAML token value:`, getPartialKey(yamlApiKey));

// Use YAML config by default (since local works with YAML)
const effectiveApiKey = yamlApiKey;

console.log(`[DEBUG_EFFECTIVE] Using API key:`, getPartialKey(effectiveApiKey));
console.log(`[DEBUG_EFFECTIVE] Key length:`, effectiveApiKey?.length);
console.log(`[DEBUG_EFFECTIVE] Key starts with AIza:`, effectiveApiKey?.startsWith('AIza'));

if (!effectiveApiKey || effectiveApiKey.trim() === "") {
throw new Error("Gemini API key is missing");
this.gemini = new GoogleGenerativeAI(settings.token);
this.openai = null;
}
this.model = settings.model;
}

/**
* Load conversation history from a file
*/
loadConversationHistory(filePath: string): void {
try {
if (!fs.existsSync(filePath)) {
console.log(`[CONVERSATION] History file not found: ${filePath}`);
return;
}

const fileContent = fs.readFileSync(filePath, 'utf-8');
const conversationFile: ConversationFile = JSON.parse(fileContent);

// Log the exact constructor call
console.log(`[DEBUG_CONSTRUCTOR] About to call GoogleGenerativeAI constructor`);
console.log(`[DEBUG_CONSTRUCTOR] Constructor argument type:`, typeof effectiveApiKey);

try {
// Try different ways of passing the API key
console.log(`[DEBUG_CONSTRUCTOR] Method 1: Direct string`);
this.gemini = new GoogleGenerativeAI(effectiveApiKey);
console.log(`[DEBUG_CONSTRUCTOR] GoogleGenerativeAI created successfully`);

// Test the client immediately
console.log(`[DEBUG_TEST] Testing client initialization...`);
const testModel = this.gemini.getGenerativeModel({ model: 'gemini-2.0-flash' });
console.log(`[DEBUG_TEST] Model instance created successfully`);

} catch (initError: any) {
console.error(`[DEBUG_ERROR] GoogleGenerativeAI constructor failed:`, initError.message);

// Try alternative constructor format
console.log(`[DEBUG_CONSTRUCTOR] Method 2: Object format`);
try {
this.gemini = new GoogleGenerativeAI({ apiKey: effectiveApiKey });
console.log(`[DEBUG_CONSTRUCTOR] GoogleGenerativeAI created with object format`);
} catch (altError: any) {
console.error(`[DEBUG_ERROR] Alternative constructor also failed:`, altError.message);
throw initError;
}
console.log(`[CONVERSATION] Loading history format: ${conversationFile.format}`);
console.log(`[CONVERSATION] History messages: ${conversationFile.conversation_history.length}`);

if (conversationFile.format === 'openai_conversation') {
this.conversationHistory = conversationFile.conversation_history as ConversationMessage[];
} else if (conversationFile.format === 'gemini_conversation') {
// Convert Gemini format to standardized format
this.conversationHistory = this.convertGeminiToStandardHistory(conversationFile.conversation_history);
}

this.openai = null;

console.log(`[CONVERSATION] Loaded ${this.conversationHistory.length} messages`);
} catch (error) {
console.error(`[CONVERSATION] Error loading history: ${error}`);
this.conversationHistory = [];
}
}

this.model = settings.model;
/**
* Convert Gemini conversation format to standard format
*/
private convertGeminiToStandardHistory(geminiHistory: any[]): ConversationMessage[] {
return geminiHistory.map(item => {
// Extract text from parts array
const textParts = item.parts
?.filter((part: any) => part.type === 'text')
?.map((part: any) => part.content) || [];

return {
role: item.role === 'model' ? 'assistant' : item.role,
content: textParts.join('\n\n')
};
}).filter(msg => msg.content.trim().length > 0);
}

/**
* Convert standard history to Gemini format
*/
private convertToGeminiHistory(messages: ConversationMessage[]): GeminiHistoryItem[] {
return messages.map(msg => ({
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }]
}));
}

private completionParams(model: string, content: string): any {
return {
model: model,
messages: [{ role: 'user', content: content }],
};
/**
* Merge conversation history with new messages
*/
private mergeWithHistory(newMessages: ConversationMessage[]): ConversationMessage[] {
return [...this.conversationHistory, ...newMessages];
}

async chatCompletionStreaming(content: string): Promise<any> {
async chatCompletionStreaming(messages: ConversationMessage[]): Promise<any> {
const fullConversation = this.mergeWithHistory(messages);
console.log(`[CONVERSATION] Total messages (with history): ${fullConversation.length}`);

if (this.openai) {
const params: ChatCompletionCreateParams = {
model: this.model,
messages: [{ role: 'user', content: content }],
messages: fullConversation as any,
stream: true
};
const resp = await this.openai.chat.completions.create(params);
return resp.toReadableStream() as any;
} else if (this.gemini) {
console.log(`[DEBUG_STREAM] Starting streaming with model: ${this.model}`);
console.log(`[DEBUG_STREAM] Content: ${content.substring(0, 50)}...`);

try {
const model: GenerativeModel = this.gemini.getGenerativeModel({ model: this.model });
console.log(`[DEBUG_STREAM] Model instance obtained`);

const chat = model.startChat({ history: [] });
console.log(`[DEBUG_STREAM] Chat started`);
// Convert to Gemini format and separate history from new message
const geminiHistory = this.convertToGeminiHistory(fullConversation.slice(0, -1));
const lastMessage = fullConversation[fullConversation.length - 1];

console.log(`[GEMINI] Starting chat with ${geminiHistory.length} history messages`);

console.log(`[DEBUG_STREAM] About to call sendMessageStream...`);
const result = await chat.sendMessageStream([{ text: content }]);
console.log(`[DEBUG_STREAM] sendMessageStream succeeded`);
const chat = model.startChat({
history: geminiHistory
});

const result = await chat.sendMessageStream([{ text: lastMessage.content }]);
return result.stream as any;
} catch (e: any) {
console.error(`[DEBUG_STREAM_ERROR] Stream error:`, e.message);
console.error(`[DEBUG_STREAM_ERROR] Error details:`, JSON.stringify(e.errorDetails || {}, null, 2));
console.error(`[DEBUG_STREAM_ERROR] Error cause:`, JSON.stringify(e.cause || {}, null, 2));
console.error(`[DEBUG_STREAM_ERROR] Full error:`, e);
console.error(`[GEMINI] Streaming error:`, e.message);
throw e;
}
} else {
throw new Error(`Unsupported provider`);
}
}

async createCompletionStreaming(params: any): Promise<any | null> {
try {
const resp = await this.openai?.chat.completions.create(params);
if (!resp) {
return null;
}
return new ReadableStream({
async start(controller: any) {
if (resp.choices) {
for await (const chunk of resp) {
if (chunk.choices && chunk.choices[0] && chunk.choices[0].delta && chunk.choices[0].delta.content) {
controller.enqueue(new TextEncoder().encode(chunk.choices[0].delta.content));
}
}
}
controller.close();
}
});
} catch (error) {
console.error("Error in createCompletionStreaming:", error);
return null;
}
}
async chatCompletionNonStreaming(messages: ConversationMessage[]): Promise<ChatCompletion | string> {
const fullConversation = this.mergeWithHistory(messages);

async chatCompletionNonStreaming(content: string): Promise<ChatCompletion | string> {
if (this.openai) {
const params = this.completionParams(this.model, content);
const params = {
model: this.model,
messages: fullConversation,
stream: false
} as OpenAi.Chat.Completions.ChatCompletionCreateParamsNonStreaming;
return await this.openai.chat.completions.create(params) as ChatCompletion;
} else if (this.gemini) {
try {
const model: GenerativeModel = this.gemini.getGenerativeModel({ model: this.model });

const geminiHistory = this.convertToGeminiHistory(fullConversation.slice(0, -1));
const lastMessage = fullConversation[fullConversation.length - 1];

const chat = model.startChat({
history: geminiHistory
});

const result = await chat.sendMessage([{ text: lastMessage.content }]);
return result.response.text();
} catch (e: any) {
console.error(`[GEMINI] Non-streaming error:`, e.message);
throw e;
}
}
if (this.gemini) {
const model: GenerativeModel = this.gemini.getGenerativeModel({ model: this.model });
const chat = model.startChat({ history: [] });
const result = await chat.sendMessage([{ text: content }]);
return result.response.text();
}
throw new Error("Not implemented for this provider for non-streaming or client not initialized.");
throw new Error("Provider not supported");
}

async createCompletionNonStreaming(params: OpenAi.Chat.Completions.ChatCompletionCreateParamsNonStreaming): Promise<OpenAi.Chat.Completions.ChatCompletion> {
let response = await this.openai.chat.completions.create(params);
if (typeof response === 'string' || response instanceof String) {
let responseStr = response as unknown as string;
response = JSON.parse(responseStr);
}
return response;
/**
* Clear conversation history
*/
clearHistory(): void {
this.conversationHistory = [];
console.log(`[CONVERSATION] History cleared`);
}

/**
* Add message to conversation history
*/
addToHistory(role: string, content: string): void {
this.conversationHistory.push({ role, content });
console.log(`[CONVERSATION] Added ${role} message to history`);
}

/**
* Get current conversation history
*/
getHistory(): ConversationMessage[] {
return [...this.conversationHistory];
}

// Legacy methods for backward compatibility
async chatCompletionStreamingLegacy(content: string): Promise<any> {
return this.chatCompletionStreaming([{ role: 'user', content }]);
}

async chatCompletionNonStreamingLegacy(content: string): Promise<ChatCompletion | string> {
return this.chatCompletionNonStreaming([{ role: 'user', content }]);
}
}
Loading