diff --git a/core/http/static/chat.js b/core/http/static/chat.js
index 62270ddb6682..0759b90d7fd0 100644
--- a/core/http/static/chat.js
+++ b/core/http/static/chat.js
@@ -27,15 +27,444 @@ SOFTWARE.
*/
-// Global variable to store the current AbortController
-let currentAbortController = null;
+// Track requests per chat ID to support parallel chatting
+let activeRequests = new Map(); // chatId -> { controller, reader, startTime, tokensReceived, interval, maxTokensPerSecond }
+
+// Global variables for UI (stop button, etc.)
+let currentAbortController = null; // For stop button - tracks the active chat's request
let currentReader = null;
-let requestStartTime = null;
-let tokensReceived = 0;
let tokensPerSecondInterval = null;
+let tokensPerSecondIntervalChatId = null; // Track which chat the interval is for
let lastTokensPerSecond = null; // Store the last calculated rate
-function toggleLoader(show) {
+// Storage key for chats
+const CHATS_STORAGE_KEY = 'localai_chats_data';
+const SYSTEM_PROMPT_STORAGE_KEY = 'system_prompt'; // Old key for migration
+
+// Debounce timer for auto-save
+let saveDebounceTimer = null;
+const SAVE_DEBOUNCE_MS = 500;
+
+// Save chats to localStorage with error handling
+function saveChatsToStorage() {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return false;
+ }
+
+ try {
+ const chatStore = Alpine.store("chat");
+ const data = {
+ chats: chatStore.chats.map(chat => ({
+ id: chat.id,
+ name: chat.name,
+ model: chat.model,
+ history: chat.history,
+ systemPrompt: chat.systemPrompt,
+ mcpMode: chat.mcpMode,
+ tokenUsage: chat.tokenUsage,
+ contextSize: chat.contextSize,
+ createdAt: chat.createdAt,
+ updatedAt: chat.updatedAt
+ })),
+ activeChatId: chatStore.activeChatId,
+ lastSaved: Date.now()
+ };
+
+ const jsonData = JSON.stringify(data);
+ localStorage.setItem(CHATS_STORAGE_KEY, jsonData);
+ return true;
+ } catch (error) {
+ // Handle quota exceeded or other storage errors
+ if (error.name === 'QuotaExceededError' || error.code === 22) {
+ console.warn('localStorage quota exceeded. Consider cleaning up old chats.');
+ // Try to save without history (last resort)
+ try {
+ const chatStore = Alpine.store("chat");
+ const data = {
+ chats: chatStore.chats.map(chat => ({
+ id: chat.id,
+ name: chat.name,
+ model: chat.model,
+ history: [], // Clear history to save space
+ systemPrompt: chat.systemPrompt,
+ mcpMode: chat.mcpMode,
+ tokenUsage: chat.tokenUsage,
+ contextSize: chat.contextSize,
+ createdAt: chat.createdAt,
+ updatedAt: chat.updatedAt
+ })),
+ activeChatId: chatStore.activeChatId,
+ lastSaved: Date.now()
+ };
+ localStorage.setItem(CHATS_STORAGE_KEY, JSON.stringify(data));
+ return true;
+ } catch (e2) {
+ console.error('Failed to save chats even without history:', e2);
+ return false;
+ }
+ } else {
+ console.error('Error saving chats to localStorage:', error);
+ return false;
+ }
+ }
+}
+
+// Load chats from localStorage with migration support
+function loadChatsFromStorage() {
+ try {
+ const stored = localStorage.getItem(CHATS_STORAGE_KEY);
+ if (stored) {
+ const data = JSON.parse(stored);
+
+ // Validate structure
+ if (data && Array.isArray(data.chats)) {
+ return {
+ chats: data.chats,
+ activeChatId: data.activeChatId || null,
+ lastSaved: data.lastSaved || null
+ };
+ }
+ }
+
+ // Migration: Check for old format
+ const oldSystemPrompt = localStorage.getItem(SYSTEM_PROMPT_STORAGE_KEY);
+ if (oldSystemPrompt) {
+ // Migrate old single-chat format to new multi-chat format
+ const chatStore = Alpine.store("chat");
+ if (chatStore) {
+ const migratedChat = chatStore.createChat(
+ document.getElementById("chat-model")?.value || "",
+ oldSystemPrompt,
+ false
+ );
+ // Try to preserve any existing history if available
+ if (chatStore.activeChat()) {
+ chatStore.activeChat().name = "Migrated Chat";
+ }
+ // Save migrated data
+ saveChatsToStorage();
+ // Remove old key
+ localStorage.removeItem(SYSTEM_PROMPT_STORAGE_KEY);
+ return {
+ chats: chatStore.chats,
+ activeChatId: chatStore.activeChatId,
+ lastSaved: Date.now()
+ };
+ }
+ }
+
+ return null;
+ } catch (error) {
+ console.error('Error loading chats from localStorage:', error);
+ // Try to recover by clearing corrupted data
+ try {
+ localStorage.removeItem(CHATS_STORAGE_KEY);
+ } catch (e) {
+ console.error('Failed to clear corrupted data:', e);
+ }
+ return null;
+ }
+}
+
+// Auto-save with debouncing
+function autoSaveChats() {
+ if (saveDebounceTimer) {
+ clearTimeout(saveDebounceTimer);
+ }
+ saveDebounceTimer = setTimeout(() => {
+ saveChatsToStorage();
+ }, SAVE_DEBOUNCE_MS);
+}
+
+// Function to check if a chat has an active request (for UI indicators)
+function isChatRequestActive(chatId) {
+ if (!chatId || !activeRequests) {
+ return false;
+ }
+ const request = activeRequests.get(chatId);
+ return request && (request.controller || request.reader);
+}
+
+// Helper function to update reactive tracking for UI indicators
+function updateRequestTracking(chatId, isActive) {
+ const chatStore = Alpine.store("chat");
+ if (chatStore && typeof chatStore.updateActiveRequestTracking === 'function') {
+ chatStore.updateActiveRequestTracking(chatId, isActive);
+ }
+}
+
+// Make functions available globally
+window.autoSaveChats = autoSaveChats;
+window.createNewChat = createNewChat;
+window.switchChat = switchChat;
+window.deleteChat = deleteChat;
+window.bulkDeleteChats = bulkDeleteChats;
+window.updateChatName = updateChatName;
+window.updateUIForActiveChat = updateUIForActiveChat;
+window.isChatRequestActive = isChatRequestActive;
+
+// Create a new chat
+function createNewChat(model, systemPrompt, mcpMode) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return null;
+ }
+
+ const chatStore = Alpine.store("chat");
+ const chat = chatStore.createChat(model, systemPrompt, mcpMode);
+
+ // Save to storage
+ saveChatsToStorage();
+
+ // Update UI to reflect new active chat
+ updateUIForActiveChat();
+
+ return chat;
+}
+
+// Switch to a different chat
+function switchChat(chatId) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return false;
+ }
+
+ const chatStore = Alpine.store("chat");
+ const oldActiveChat = chatStore.activeChat();
+
+ if (chatStore.switchChat(chatId)) {
+ // CRITICAL: Stop interval FIRST before any other operations
+ // This prevents the interval from updating with wrong chat's data
+ if (tokensPerSecondInterval) {
+ clearInterval(tokensPerSecondInterval);
+ tokensPerSecondInterval = null;
+ }
+
+ // Immediately clear the display to prevent showing stale data
+ const tokensPerSecondDisplay = document.getElementById('tokens-per-second');
+ if (tokensPerSecondDisplay) {
+ tokensPerSecondDisplay.textContent = '-';
+ }
+
+ // Save current state before switching
+ saveChatsToStorage();
+
+ // Hide badge when switching chats - will be shown if new chat has completed request
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (maxBadge) {
+ maxBadge.style.display = 'none';
+ }
+
+ // Update global request tracking for stop button (only if new chat has active request)
+ const newActiveChat = chatStore.activeChat();
+ const newRequest = activeRequests.get(newActiveChat?.id);
+ if (newRequest) {
+ currentAbortController = newRequest.controller;
+ currentReader = newRequest.reader;
+ // Update loader state if new chat has active request
+ const hasActiveRequest = newRequest.controller || newRequest.reader;
+ if (hasActiveRequest) {
+ toggleLoader(true, newActiveChat.id);
+ // Wait a bit to ensure switch is complete and interval is stopped
+ setTimeout(() => {
+ // Double-check we're still on the same chat and interval is stopped
+ const currentActiveChat = chatStore.activeChat();
+ if (currentActiveChat && currentActiveChat.id === newActiveChat.id) {
+ // Make absolutely sure interval is stopped
+ if (tokensPerSecondInterval) {
+ clearInterval(tokensPerSecondInterval);
+ tokensPerSecondInterval = null;
+ tokensPerSecondIntervalChatId = null;
+ }
+ // Update display for the new active chat
+ updateTokensPerSecond(newActiveChat.id);
+ // Restart interval to pick up the new active chat
+ startTokensPerSecondInterval();
+ }
+ }, 100);
+ } else {
+ toggleLoader(false, newActiveChat.id);
+ }
+ } else {
+ // No active request for new chat, clear global references
+ currentAbortController = null;
+ currentReader = null;
+ toggleLoader(false, newActiveChat?.id);
+ // Display is already cleared above
+
+ // Check if this chat has a completed request with max tokens/s to show
+ // Note: We only show badge for completed requests, not active ones
+ // The badge will be shown when the request ends, not when switching to a chat
+ }
+
+ // Update UI to reflect new active chat
+ updateUIForActiveChat();
+
+ return true;
+ }
+ return false;
+}
+
+// Delete a chat
+function deleteChat(chatId) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return false;
+ }
+
+ const chatStore = Alpine.store("chat");
+
+ // Prevent deleting the last chat
+ if (chatStore.chats.length <= 1) {
+ alert('Cannot delete the last chat. Please create a new chat first.');
+ return false;
+ }
+
+ if (chatStore.deleteChat(chatId)) {
+ // Ensure at least one chat exists after deletion
+ if (chatStore.chats.length === 0) {
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ chatStore.createChat(currentModel, "", false);
+ }
+
+ saveChatsToStorage();
+ updateUIForActiveChat();
+ return true;
+ }
+ return false;
+}
+
+// Bulk delete chats
+function bulkDeleteChats(options) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return 0;
+ }
+
+ const chatStore = Alpine.store("chat");
+ let deletedCount = 0;
+ const now = Date.now();
+
+ if (options.deleteAll) {
+ // Delete all chats except active one, or create new if deleting all
+ const activeId = chatStore.activeChatId;
+ chatStore.chats = chatStore.chats.filter(chat => {
+ if (chat.id === activeId && chatStore.chats.length > 1) {
+ return true; // Keep active chat if there are others
+ }
+ deletedCount++;
+ return false;
+ });
+
+ // If all deleted, create a new chat
+ if (chatStore.chats.length === 0) {
+ chatStore.createChat();
+ } else if (!chatStore.chats.find(c => c.id === activeId)) {
+ // Active chat was deleted, switch to first available
+ if (chatStore.chats.length > 0) {
+ chatStore.activeChatId = chatStore.chats[0].id;
+ }
+ }
+ } else if (options.olderThanDays) {
+ const cutoffTime = now - (options.olderThanDays * 24 * 60 * 60 * 1000);
+ const activeId = chatStore.activeChatId;
+
+ chatStore.chats = chatStore.chats.filter(chat => {
+ if (chat.id === activeId) {
+ return true; // Never delete active chat
+ }
+ if (chat.updatedAt < cutoffTime) {
+ deletedCount++;
+ return false;
+ }
+ return true;
+ });
+
+ // Ensure at least one chat exists
+ if (chatStore.chats.length === 0) {
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ chatStore.createChat(currentModel, "", false);
+ }
+ }
+
+ if (deletedCount > 0) {
+ saveChatsToStorage();
+ updateUIForActiveChat();
+ }
+
+ return deletedCount;
+}
+
+// Update UI elements to reflect active chat
+function updateUIForActiveChat() {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return;
+ }
+
+ const chatStore = Alpine.store("chat");
+
+ // Ensure at least one chat exists
+ if (!chatStore.chats || chatStore.chats.length === 0) {
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ chatStore.createChat(currentModel, "", false);
+ }
+
+ const activeChat = chatStore.activeChat();
+
+ if (!activeChat) {
+ // No active chat, set first one as active
+ if (chatStore.chats.length > 0) {
+ chatStore.activeChatId = chatStore.chats[0].id;
+ } else {
+ // Still no chats, create one
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ chatStore.createChat(currentModel, "", false);
+ }
+ return;
+ }
+
+ // Update system prompt input
+ const systemPromptInput = document.getElementById("systemPrompt");
+ if (systemPromptInput) {
+ systemPromptInput.value = activeChat.systemPrompt || "";
+ }
+
+ // Update MCP toggle
+ const mcpToggle = document.getElementById("mcp-toggle");
+ if (mcpToggle) {
+ mcpToggle.checked = activeChat.mcpMode || false;
+ }
+
+ // Update model selector (if needed)
+ const modelSelector = document.getElementById("modelSelector");
+ if (modelSelector && activeChat.model) {
+ // Find and select the option matching the active chat's model
+ for (let option of modelSelector.options) {
+ if (option.value === `chat/${activeChat.model}` || option.text === activeChat.model) {
+ option.selected = true;
+ break;
+ }
+ }
+ }
+
+ // Update chat model hidden input
+ const chatModelInput = document.getElementById("chat-model");
+ if (chatModelInput) {
+ chatModelInput.value = activeChat.model || "";
+ }
+}
+
+// Update chat name
+function updateChatName(chatId, name) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ return false;
+ }
+
+ const chatStore = Alpine.store("chat");
+ if (chatStore.updateChatName(chatId, name)) {
+ autoSaveChats();
+ return true;
+ }
+ return false;
+}
+
+function toggleLoader(show, chatId = null) {
const sendButton = document.getElementById('send-button');
const stopButton = document.getElementById('stop-button');
const headerLoadingIndicator = document.getElementById('header-loading-indicator');
@@ -45,49 +474,256 @@ function toggleLoader(show) {
sendButton.style.display = 'none';
stopButton.style.display = 'block';
if (headerLoadingIndicator) headerLoadingIndicator.style.display = 'block';
- // Reset token tracking
- requestStartTime = Date.now();
- tokensReceived = 0;
- // Start updating tokens/second display
- if (tokensPerSecondDisplay) {
+ // Start updating tokens/second display only if this is for the active chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+
+ // Always stop any existing interval first
+ if (tokensPerSecondInterval) {
+ clearInterval(tokensPerSecondInterval);
+ tokensPerSecondInterval = null;
+ }
+
+ // Use provided chatId or get from active chat
+ const targetChatId = chatId || (activeChat ? activeChat.id : null);
+
+ if (tokensPerSecondDisplay && targetChatId && activeChat && activeChat.id === targetChatId) {
+ tokensPerSecondDisplay.textContent = '-';
+ // Hide max badge when starting new request
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (maxBadge) {
+ maxBadge.style.display = 'none';
+ }
+ // Don't start interval here - it will be started when the request is created
+ // Just update once to show initial state
+ updateTokensPerSecond(targetChatId);
+ } else if (tokensPerSecondDisplay) {
+ // Not the active chat, hide or show dash
tokensPerSecondDisplay.textContent = '-';
- updateTokensPerSecond();
- tokensPerSecondInterval = setInterval(updateTokensPerSecond, 500); // Update every 500ms
}
} else {
sendButton.style.display = 'block';
stopButton.style.display = 'none';
if (headerLoadingIndicator) headerLoadingIndicator.style.display = 'none';
- // Stop updating but keep the last value visible
- if (tokensPerSecondInterval) {
- clearInterval(tokensPerSecondInterval);
- tokensPerSecondInterval = null;
+ // Stop updating but keep the last value visible only if this was the active chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (chatId && activeChat && activeChat.id === chatId) {
+ // Stop the interval since this request is done
+ stopTokensPerSecondInterval();
+ // Keep the last calculated rate visible
+ if (tokensPerSecondDisplay && lastTokensPerSecond !== null) {
+ tokensPerSecondDisplay.textContent = lastTokensPerSecond;
+ }
+ // Check if there are other active requests for the active chat and restart interval if needed
+ const activeRequest = activeRequests.get(activeChat.id);
+ if (activeRequest && (activeRequest.controller || activeRequest.reader)) {
+ // Restart interval for the active chat
+ startTokensPerSecondInterval();
+ }
+ } else if (tokensPerSecondDisplay) {
+ // Not the active chat, just show dash
+ tokensPerSecondDisplay.textContent = '-';
}
- // Keep the last calculated rate visible
- if (tokensPerSecondDisplay && lastTokensPerSecond !== null) {
- tokensPerSecondDisplay.textContent = lastTokensPerSecond;
+ // Only clear global references if this was the active chat
+ if (chatId && activeChat && activeChat.id === chatId) {
+ currentAbortController = null;
+ currentReader = null;
+
+ // Show the max tokens/s badge when request ends
+ const request = activeRequests.get(chatId);
+ if (request && request.maxTokensPerSecond > 0) {
+ updateMaxTokensPerSecondBadge(chatId, request.maxTokensPerSecond);
+ }
}
- currentAbortController = null;
- currentReader = null;
- requestStartTime = null;
- tokensReceived = 0;
}
}
-function updateTokensPerSecond() {
+// Start a single global interval that updates tokens/second for the active chat
+function startTokensPerSecondInterval() {
+ // Stop any existing interval first
+ stopTokensPerSecondInterval();
+
+ // Get the current active chat ID to track
+ const chatStore = Alpine.store("chat");
+ if (!chatStore) {
+ return;
+ }
+
+ const activeChat = chatStore.activeChat();
+ if (!activeChat) {
+ return;
+ }
+
+ // Check if active chat has an active request
+ // We can start the interval if we have at least a controller (reader will be set when streaming starts)
+ const request = activeRequests.get(activeChat.id);
+ if (!request) {
+ // No active request for this chat
+ return;
+ }
+
+ if (!request.controller) {
+ // No controller yet, don't start interval
+ return;
+ }
+
+ // Store which chat this interval is for
+ tokensPerSecondIntervalChatId = activeChat.id;
+
+ // Start a single interval that always checks the current active chat
+ // Use a function that always gets fresh state, no closures
+ tokensPerSecondInterval = setInterval(() => {
+ // Always get fresh references - no closures
+ const currentChatStore = Alpine.store("chat");
+ if (!currentChatStore) {
+ stopTokensPerSecondInterval();
+ return;
+ }
+
+ const currentActiveChat = currentChatStore.activeChat();
+ const tokensPerSecondDisplay = document.getElementById('tokens-per-second');
+
+ if (!tokensPerSecondDisplay) {
+ stopTokensPerSecondInterval();
+ return;
+ }
+
+ // CRITICAL: Check if the active chat has changed
+ if (!currentActiveChat || currentActiveChat.id !== tokensPerSecondIntervalChatId) {
+ // Active chat changed, stop this interval immediately and hide badge
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (maxBadge) {
+ maxBadge.style.display = 'none';
+ }
+ stopTokensPerSecondInterval();
+ return;
+ }
+
+ // Check if active chat still has an active request
+ const currentRequest = activeRequests.get(currentActiveChat.id);
+ if (!currentRequest) {
+ // No active request for this chat anymore - hide badge
+ tokensPerSecondDisplay.textContent = '-';
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (maxBadge) {
+ maxBadge.style.display = 'none';
+ }
+ stopTokensPerSecondInterval();
+ return;
+ }
+
+ // If controller is gone, request ended - show max rate badge only for this chat
+ if (!currentRequest.controller) {
+ tokensPerSecondDisplay.textContent = '-';
+ if (currentRequest.maxTokensPerSecond > 0) {
+ // Only show badge if this is still the active chat
+ updateMaxTokensPerSecondBadge(currentActiveChat.id, currentRequest.maxTokensPerSecond);
+ } else {
+ // Hide badge if no max value
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (maxBadge) {
+ maxBadge.style.display = 'none';
+ }
+ }
+ stopTokensPerSecondInterval();
+ return;
+ }
+
+ // Update for the current active chat only
+ updateTokensPerSecond(currentActiveChat.id);
+ }, 250); // Update more frequently for better responsiveness
+}
+
+// Stop the tokens/second interval
+function stopTokensPerSecondInterval() {
+ if (tokensPerSecondInterval) {
+ clearInterval(tokensPerSecondInterval);
+ tokensPerSecondInterval = null;
+ }
+ tokensPerSecondIntervalChatId = null; // Clear tracked chat ID
const tokensPerSecondDisplay = document.getElementById('tokens-per-second');
- if (!tokensPerSecondDisplay || !requestStartTime) return;
-
- const elapsedSeconds = (Date.now() - requestStartTime) / 1000;
- if (elapsedSeconds > 0 && tokensReceived > 0) {
- const rate = tokensReceived / elapsedSeconds;
- const formattedRate = `${rate.toFixed(1)} tokens/s`;
- tokensPerSecondDisplay.textContent = formattedRate;
- lastTokensPerSecond = formattedRate; // Store the last calculated rate
- } else if (elapsedSeconds > 0) {
+ if (tokensPerSecondDisplay) {
tokensPerSecondDisplay.textContent = '-';
}
+ // Clear the last rate so it doesn't get reused
+ lastTokensPerSecond = null;
+}
+
+function updateTokensPerSecond(chatId) {
+ const tokensPerSecondDisplay = document.getElementById('tokens-per-second');
+ if (!tokensPerSecondDisplay || !chatId) {
+ return;
+ }
+
+ // Get the request info for this chat
+ const request = activeRequests.get(chatId);
+ if (!request || !request.startTime) {
+ tokensPerSecondDisplay.textContent = '-';
+ return;
+ }
+
+ // Verify the request is still active (controller is cleared when request ends)
+ if (!request.controller) {
+ tokensPerSecondDisplay.textContent = '-';
+ return;
+ }
+
+ // Check if this is still the active chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore ? chatStore.activeChat() : null;
+ if (!activeChat || activeChat.id !== chatId) {
+ // Not the active chat anymore
+ tokensPerSecondDisplay.textContent = '-';
+ return;
+ }
+
+ const elapsedSeconds = (Date.now() - request.startTime) / 1000;
+ // Show rate if we have tokens, otherwise show waiting indicator
+ if (elapsedSeconds > 0) {
+ if (request.tokensReceived > 0) {
+ const rate = request.tokensReceived / elapsedSeconds;
+ // Update max rate if this is higher
+ if (rate > (request.maxTokensPerSecond || 0)) {
+ request.maxTokensPerSecond = rate;
+ }
+ const formattedRate = `${rate.toFixed(1)} tokens/s`;
+ tokensPerSecondDisplay.textContent = formattedRate;
+ lastTokensPerSecond = formattedRate; // Store the last calculated rate
+
+ // Update the max badge if it exists (only show during active request if user wants, or we can show it at the end)
+ } else {
+ // Request is active but no tokens yet - show waiting
+ tokensPerSecondDisplay.textContent = '0.0 tokens/s';
+ }
+ } else {
+ // Just started
+ tokensPerSecondDisplay.textContent = '-';
+ }
+}
+
+// Update the max tokens/s badge display
+function updateMaxTokensPerSecondBadge(chatId, maxRate) {
+ const maxBadge = document.getElementById('max-tokens-per-second-badge');
+ if (!maxBadge) return;
+
+ // Check if this is still the active chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore ? chatStore.activeChat() : null;
+ if (!activeChat || activeChat.id !== chatId) {
+ // Not the active chat, hide badge
+ maxBadge.style.display = 'none';
+ return;
+ }
+
+ // Only show badge if we have a valid max rate
+ if (maxRate > 0) {
+ maxBadge.textContent = `Peak: ${maxRate.toFixed(1)} tokens/s`;
+ maxBadge.style.display = 'inline-flex';
+ } else {
+ maxBadge.style.display = 'none';
+ }
}
function scrollThinkingBoxToBottom() {
@@ -108,6 +744,27 @@ function scrollThinkingBoxToBottom() {
window.scrollThinkingBoxToBottom = scrollThinkingBoxToBottom;
function stopRequest() {
+ // Stop the request for the currently active chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (!activeChat) return;
+
+ const request = activeRequests.get(activeChat.id);
+ if (request) {
+ if (request.controller) {
+ request.controller.abort();
+ }
+ if (request.reader) {
+ request.reader.cancel();
+ }
+ if (request.interval) {
+ clearInterval(request.interval);
+ }
+ activeRequests.delete(activeChat.id);
+ updateRequestTracking(activeChat.id, false);
+ }
+
+ // Also clear global references
if (currentAbortController) {
currentAbortController.abort();
currentAbortController = null;
@@ -116,10 +773,13 @@ function stopRequest() {
currentReader.cancel();
currentReader = null;
}
- toggleLoader(false);
- Alpine.store("chat").add(
+ toggleLoader(false, activeChat.id);
+ chatStore.add(
"assistant",
`Request cancelled by user`,
+ null,
+ null,
+ activeChat.id
);
}
@@ -151,7 +811,13 @@ function processThinkingTags(content) {
function submitSystemPrompt(event) {
event.preventDefault();
- localStorage.setItem("system_prompt", document.getElementById("systemPrompt").value);
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (activeChat) {
+ activeChat.systemPrompt = document.getElementById("systemPrompt").value;
+ activeChat.updatedAt = Date.now();
+ autoSaveChats();
+ }
document.getElementById("systemPrompt").blur();
}
@@ -324,16 +990,21 @@ function submitPrompt(event) {
const inputValue = input.value;
if (!inputValue.trim()) return; // Don't send empty messages
- // If already processing, abort the current request and send the new one
- if (currentAbortController || currentReader) {
- // Abort current request
- stopRequest();
- // Small delay to ensure cleanup completes
- setTimeout(() => {
- // Continue with new request
- processAndSendMessage(inputValue);
- }, 100);
- return;
+ // Check if there's an active request for the current chat
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (activeChat) {
+ const activeRequest = activeRequests.get(activeChat.id);
+ if (activeRequest && (activeRequest.controller || activeRequest.reader)) {
+ // Abort current request for this chat
+ stopRequest();
+ // Small delay to ensure cleanup completes
+ setTimeout(() => {
+ // Continue with new request
+ processAndSendMessage(inputValue);
+ }, 100);
+ return;
+ }
}
processAndSendMessage(inputValue);
@@ -363,14 +1034,16 @@ function processAndSendMessage(inputValue) {
Alpine.store("chat").add("user", displayContent, images, audios);
// Update the last message in the store with the full content
- const history = Alpine.store("chat").history;
- if (history.length > 0) {
- history[history.length - 1].content = fullInput;
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (activeChat && activeChat.history.length > 0) {
+ activeChat.history[activeChat.history.length - 1].content = fullInput;
+ activeChat.updatedAt = Date.now();
}
const input = document.getElementById("input");
if (input) input.value = "";
- const systemPrompt = localStorage.getItem("system_prompt");
+ const systemPrompt = activeChat?.systemPrompt || "";
Alpine.nextTick(() => {
const chatContainer = document.getElementById('chat');
if (chatContainer) {
@@ -451,17 +1124,27 @@ function readInputAudioFile(file) {
}
async function promptGPT(systemPrompt, input) {
- const model = document.getElementById("chat-model").value;
- const mcpMode = Alpine.store("chat").mcpMode;
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+ if (!activeChat) {
+ console.error('No active chat');
+ return;
+ }
+
+ const model = activeChat.model || document.getElementById("chat-model").value;
+ const mcpMode = activeChat.mcpMode || false;
// Reset current request usage tracking for new request
- if (Alpine.store("chat")) {
- Alpine.store("chat").tokenUsage.currentRequest = null;
+ if (activeChat.tokenUsage) {
+ activeChat.tokenUsage.currentRequest = null;
}
- toggleLoader(true);
+ // Store the chat ID for this request so we can track it even if user switches chats
+ const chatId = activeChat.id;
+
+ toggleLoader(true, chatId);
- messages = Alpine.store("chat").messages();
+ messages = chatStore.messages();
// if systemPrompt isn't empty, push it at the start of messages
if (systemPrompt) {
@@ -525,11 +1208,46 @@ async function promptGPT(systemPrompt, input) {
// Add stream parameter for both regular chat and MCP (MCP now supports SSE streaming)
requestBody.stream = true;
+ // Add generation parameters if they are set (null means use default)
+ if (activeChat.temperature !== null && activeChat.temperature !== undefined) {
+ requestBody.temperature = activeChat.temperature;
+ }
+ if (activeChat.topP !== null && activeChat.topP !== undefined) {
+ requestBody.top_p = activeChat.topP;
+ }
+ if (activeChat.topK !== null && activeChat.topK !== undefined) {
+ requestBody.top_k = activeChat.topK;
+ }
+
let response;
try {
// Create AbortController for timeout handling and stop button
const controller = new AbortController();
- currentAbortController = controller; // Store globally so stop button can abort it
+ // Store per-chat so switching chats doesn't abort this request
+ const requestStartTime = Date.now();
+ activeRequests.set(chatId, {
+ controller: controller,
+ reader: null,
+ startTime: requestStartTime,
+ tokensReceived: 0,
+ interval: null,
+ maxTokensPerSecond: 0
+ });
+
+ // Update reactive tracking for UI indicators
+ updateRequestTracking(chatId, true);
+ // Also store globally for stop button (only for active chat)
+ currentAbortController = controller;
+
+ // Start tokens/second interval now that the request is created
+ // Try to start immediately, and also schedule a retry in case Alpine isn't ready
+ startTokensPerSecondInterval();
+ setTimeout(() => {
+ // Retry in case the first attempt failed due to timing
+ if (!tokensPerSecondInterval) {
+ startTokensPerSecondInterval();
+ }
+ }, 200);
const timeoutId = setTimeout(() => controller.abort(), mcpMode ? 300000 : 30000); // 5 minutes for MCP, 30 seconds for regular
response = await fetch(endpoint, {
@@ -553,29 +1271,48 @@ async function promptGPT(systemPrompt, input) {
return;
} else {
// Timeout error (controller was aborted by timeout, not user)
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`Request timeout: MCP processing is taking longer than expected. Please try again.`,
+ null,
+ null,
+ chatId
);
}
} else {
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`Network Error: ${error.message}`,
+ null,
+ null,
+ chatId
);
}
- toggleLoader(false);
- currentAbortController = null;
+ toggleLoader(false, chatId);
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
+ const activeChat = chatStore.activeChat();
+ if (activeChat && activeChat.id === chatId) {
+ currentAbortController = null;
+ }
return;
}
if (!response.ok) {
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`Error: POST ${endpoint} ${response.status}`,
+ null,
+ null,
+ chatId
);
- toggleLoader(false);
- currentAbortController = null;
+ toggleLoader(false, chatId);
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
+ const activeChat = chatStore.activeChat();
+ if (activeChat && activeChat.id === chatId) {
+ currentAbortController = null;
+ }
return;
}
@@ -587,15 +1324,25 @@ async function promptGPT(systemPrompt, input) {
.getReader();
if (!reader) {
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`Error: Failed to decode MCP API response`,
+ null,
+ null,
+ chatId
);
- toggleLoader(false);
+ toggleLoader(false, chatId);
+ activeRequests.delete(chatId);
return;
}
- // Store reader globally so stop button can cancel it
+ // Store reader per-chat and globally
+ const mcpRequest = activeRequests.get(chatId);
+ if (mcpRequest) {
+ mcpRequest.reader = reader;
+ // Ensure tracking is updated when reader is set
+ updateRequestTracking(chatId, true);
+ }
currentReader = reader;
let buffer = "";
@@ -613,6 +1360,14 @@ async function promptGPT(systemPrompt, input) {
const { value, done } = await reader.read();
if (done) break;
+ // Check if chat still exists and is still the target chat (user might have switched)
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) {
+ // Chat was deleted, abort
+ break;
+ }
+ const targetHistory = currentChat.history;
+
buffer += value;
let lines = buffer.split("\n");
@@ -632,10 +1387,9 @@ async function promptGPT(systemPrompt, input) {
switch (eventData.type) {
case "reasoning":
if (eventData.content) {
- const chatStore = Alpine.store("chat");
// Insert reasoning before assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
+ targetHistory.splice(lastAssistantMessageIndex, 0, {
role: "reasoning",
content: eventData.content,
html: DOMPurify.sanitize(marked.parse(eventData.content)),
@@ -656,7 +1410,7 @@ async function promptGPT(systemPrompt, input) {
}, 100);
} else {
// No assistant message yet, just add normally
- chatStore.add("reasoning", eventData.content);
+ chatStore.add("reasoning", eventData.content, null, null, chatId);
}
}
break;
@@ -669,7 +1423,7 @@ async function promptGPT(systemPrompt, input) {
arguments: eventData.arguments || {},
reasoning: eventData.reasoning || ""
};
- Alpine.store("chat").add("tool_call", JSON.stringify(toolCallData, null, 2));
+ chatStore.add("tool_call", JSON.stringify(toolCallData, null, 2), null, null, chatId);
// Scroll smoothly after adding tool call
setTimeout(() => {
const chatContainer = document.getElementById('chat');
@@ -690,7 +1444,7 @@ async function promptGPT(systemPrompt, input) {
name: eventData.name,
result: eventData.result || ""
};
- Alpine.store("chat").add("tool_result", JSON.stringify(toolResultData, null, 2));
+ chatStore.add("tool_result", JSON.stringify(toolResultData, null, 2), null, null, chatId);
// Scroll smoothly after adding tool result
setTimeout(() => {
const chatContainer = document.getElementById('chat');
@@ -714,9 +1468,13 @@ async function promptGPT(systemPrompt, input) {
assistantContent += eventData.content;
const contentChunk = eventData.content;
- // Count tokens for rate calculation
- tokensReceived += Math.ceil(contentChunk.length / 4);
- updateTokensPerSecond();
+ // Count tokens for rate calculation (per chat)
+ const request = activeRequests.get(chatId);
+ if (request) {
+ request.tokensReceived += Math.ceil(contentChunk.length / 4);
+ }
+ // Only update display if this is the active chat (interval will handle it)
+ // Don't call updateTokensPerSecond here to avoid unnecessary updates
// Check for thinking tags in the chunk (incremental detection)
if (contentChunk.includes("") || contentChunk.includes("")) {
@@ -733,14 +1491,15 @@ async function promptGPT(systemPrompt, input) {
const thinkingMatch = thinkingContent.match(/<(?:thinking|redacted_reasoning)>(.*?)<\/(?:thinking|redacted_reasoning)>/s);
if (thinkingMatch && thinkingMatch[1]) {
const extractedThinking = thinkingMatch[1];
- const chatStore = Alpine.store("chat");
- const isMCPMode = chatStore.mcpMode || false;
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) break; // Chat was deleted
+ const isMCPMode = currentChat.mcpMode || false;
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
if (lastThinkingMessageIndex === -1) {
// Insert thinking before the last assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
+ if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
// Insert before assistant message
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ targetHistory.splice(lastAssistantMessageIndex, 0, {
role: "thinking",
content: extractedThinking,
html: DOMPurify.sanitize(marked.parse(extractedThinking)),
@@ -752,12 +1511,12 @@ async function promptGPT(systemPrompt, input) {
lastAssistantMessageIndex++; // Adjust index since we inserted
} else {
// No assistant message yet, just add normally
- chatStore.add("thinking", extractedThinking);
- lastThinkingMessageIndex = chatStore.history.length - 1;
+ chatStore.add("thinking", extractedThinking, null, null, chatId);
+ lastThinkingMessageIndex = targetHistory.length - 1;
}
} else {
// Update existing thinking message
- const lastMessage = chatStore.history[lastThinkingMessageIndex];
+ const lastMessage = targetHistory[lastThinkingMessageIndex];
if (lastMessage && lastMessage.role === "thinking") {
lastMessage.content = extractedThinking;
lastMessage.html = DOMPurify.sanitize(marked.parse(extractedThinking));
@@ -783,15 +1542,16 @@ async function promptGPT(systemPrompt, input) {
// Handle content based on thinking state
if (isThinking) {
thinkingContent += contentChunk;
- const chatStore = Alpine.store("chat");
- const isMCPMode = chatStore.mcpMode || false;
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) break; // Chat was deleted
+ const isMCPMode = currentChat.mcpMode || false;
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
// Update the last thinking message or create a new one (incremental)
if (lastThinkingMessageIndex === -1) {
// Insert thinking before the last assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
+ if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
// Insert before assistant message
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ targetHistory.splice(lastAssistantMessageIndex, 0, {
role: "thinking",
content: thinkingContent,
html: DOMPurify.sanitize(marked.parse(thinkingContent)),
@@ -803,12 +1563,12 @@ async function promptGPT(systemPrompt, input) {
lastAssistantMessageIndex++; // Adjust index since we inserted
} else {
// No assistant message yet, just add normally
- chatStore.add("thinking", thinkingContent);
- lastThinkingMessageIndex = chatStore.history.length - 1;
+ chatStore.add("thinking", thinkingContent, null, null, chatId);
+ lastThinkingMessageIndex = targetHistory.length - 1;
}
} else {
// Update existing thinking message
- const lastMessage = chatStore.history[lastThinkingMessageIndex];
+ const lastMessage = targetHistory[lastThinkingMessageIndex];
if (lastMessage && lastMessage.role === "thinking") {
lastMessage.content = thinkingContent;
lastMessage.html = DOMPurify.sanitize(marked.parse(thinkingContent));
@@ -838,9 +1598,12 @@ async function promptGPT(systemPrompt, input) {
break;
case "error":
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`MCP Error: ${eventData.message}`,
+ null,
+ null,
+ chatId
);
break;
}
@@ -859,14 +1622,15 @@ async function promptGPT(systemPrompt, input) {
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
// Update or create assistant message with processed regular content
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) break; // Chat was deleted
if (lastAssistantMessageIndex === -1) {
if (processedRegular && processedRegular.trim()) {
- Alpine.store("chat").add("assistant", processedRegular);
- lastAssistantMessageIndex = Alpine.store("chat").history.length - 1;
+ chatStore.add("assistant", processedRegular, null, null, chatId);
+ lastAssistantMessageIndex = targetHistory.length - 1;
}
} else {
- const chatStore = Alpine.store("chat");
- const lastMessage = chatStore.history[lastAssistantMessageIndex];
+ const lastMessage = targetHistory[lastAssistantMessageIndex];
if (lastMessage && lastMessage.role === "assistant") {
lastMessage.content = (lastMessage.content || "") + (processedRegular || "");
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
@@ -875,12 +1639,11 @@ async function promptGPT(systemPrompt, input) {
// Add any extracted thinking content from the processed buffer BEFORE assistant message
if (processedThinking && processedThinking.trim()) {
- const chatStore = Alpine.store("chat");
- const isMCPMode = chatStore.mcpMode || false;
+ const isMCPMode = currentChat.mcpMode || false;
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
// Insert thinking before assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
+ targetHistory.splice(lastAssistantMessageIndex, 0, {
role: "thinking",
content: processedThinking,
html: DOMPurify.sanitize(marked.parse(processedThinking)),
@@ -891,7 +1654,7 @@ async function promptGPT(systemPrompt, input) {
lastAssistantMessageIndex++; // Adjust index since we inserted
} else {
// No assistant message yet, just add normally
- chatStore.add("thinking", processedThinking);
+ chatStore.add("thinking", processedThinking, null, null, chatId);
}
}
@@ -905,15 +1668,22 @@ async function promptGPT(systemPrompt, input) {
// Process any remaining thinking tags that might be in the buffer
const { regularContent: processedRegular, thinkingContent: processedThinking } = processThinkingTags(regularContent);
- const chatStore = Alpine.store("chat");
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) {
+ // Chat was deleted, cleanup and exit
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
+ return;
+ }
+ const targetHistory = currentChat.history;
// First, add any extracted thinking content BEFORE assistant message
if (processedThinking && processedThinking.trim()) {
- const isMCPMode = chatStore.mcpMode || false;
+ const isMCPMode = currentChat.mcpMode || false;
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
// Insert thinking before assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ if (lastAssistantMessageIndex >= 0 && targetHistory[lastAssistantMessageIndex]?.role === "assistant") {
+ targetHistory.splice(lastAssistantMessageIndex, 0, {
role: "thinking",
content: processedThinking,
html: DOMPurify.sanitize(marked.parse(processedThinking)),
@@ -924,34 +1694,35 @@ async function promptGPT(systemPrompt, input) {
lastAssistantMessageIndex++; // Adjust index since we inserted
} else {
// No assistant message yet, just add normally
- chatStore.add("thinking", processedThinking);
+ chatStore.add("thinking", processedThinking, null, null, chatId);
}
}
// Then update or create assistant message
if (lastAssistantMessageIndex !== -1) {
- const lastMessage = chatStore.history[lastAssistantMessageIndex];
+ const lastMessage = targetHistory[lastAssistantMessageIndex];
if (lastMessage && lastMessage.role === "assistant") {
lastMessage.content = (lastMessage.content || "") + (processedRegular || "");
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
}
} else if (processedRegular && processedRegular.trim()) {
- chatStore.add("assistant", processedRegular);
- lastAssistantMessageIndex = chatStore.history.length - 1;
+ chatStore.add("assistant", processedRegular, null, null, chatId);
+ lastAssistantMessageIndex = targetHistory.length - 1;
}
}
// Final thinking content flush if any data remains (from incremental detection)
- if (thinkingContent.trim() && lastThinkingMessageIndex === -1) {
+ const finalChat = chatStore.getChat(chatId);
+ if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
+ const finalHistory = finalChat.history;
// Extract thinking content if tags are present
const thinkingMatch = thinkingContent.match(/<(?:thinking|redacted_reasoning)>(.*?)<\/(?:thinking|redacted_reasoning)>/s);
if (thinkingMatch && thinkingMatch[1]) {
- const chatStore = Alpine.store("chat");
- const isMCPMode = chatStore.mcpMode || false;
+ const isMCPMode = finalChat.mcpMode || false;
const shouldExpand = !isMCPMode; // Expanded in non-MCP mode, collapsed in MCP mode
// Insert thinking before assistant message if it exists
- if (lastAssistantMessageIndex >= 0 && chatStore.history[lastAssistantMessageIndex]?.role === "assistant") {
- chatStore.history.splice(lastAssistantMessageIndex, 0, {
+ if (lastAssistantMessageIndex >= 0 && finalHistory[lastAssistantMessageIndex]?.role === "assistant") {
+ finalHistory.splice(lastAssistantMessageIndex, 0, {
role: "thinking",
content: thinkingMatch[1],
html: DOMPurify.sanitize(marked.parse(thinkingMatch[1])),
@@ -961,60 +1732,78 @@ async function promptGPT(systemPrompt, input) {
});
} else {
// No assistant message yet, just add normally
- chatStore.add("thinking", thinkingMatch[1]);
+ chatStore.add("thinking", thinkingMatch[1], null, null, chatId);
}
} else {
- Alpine.store("chat").add("thinking", thinkingContent);
+ chatStore.add("thinking", thinkingContent, null, null, chatId);
}
}
// Final pass: process the entire assistantContent to catch any missed thinking tags
// This ensures we don't miss tags that were split across chunks
- if (assistantContent.trim()) {
+ if (finalChat && assistantContent.trim()) {
+ const finalHistory = finalChat.history;
const { regularContent: finalRegular, thinkingContent: finalThinking } = processThinkingTags(assistantContent);
// Update assistant message with final processed content (without thinking tags)
if (finalRegular && finalRegular.trim()) {
if (lastAssistantMessageIndex !== -1) {
- const chatStore = Alpine.store("chat");
- const lastMessage = chatStore.history[lastAssistantMessageIndex];
+ const lastMessage = finalHistory[lastAssistantMessageIndex];
if (lastMessage && lastMessage.role === "assistant") {
lastMessage.content = finalRegular;
lastMessage.html = DOMPurify.sanitize(marked.parse(lastMessage.content));
}
} else {
- Alpine.store("chat").add("assistant", finalRegular);
+ chatStore.add("assistant", finalRegular, null, null, chatId);
}
}
// Add any extracted thinking content (only if not already added)
if (finalThinking && finalThinking.trim()) {
- const hasThinking = Alpine.store("chat").history.some(msg =>
+ const hasThinking = finalHistory.some(msg =>
msg.role === "thinking" && msg.content.trim() === finalThinking.trim()
);
if (!hasThinking) {
- Alpine.store("chat").add("thinking", finalThinking);
+ chatStore.add("thinking", finalThinking, null, null, chatId);
}
}
}
+
+ // Cleanup request tracking
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
// Highlight all code blocks once at the end
hljs.highlightAll();
} catch (error) {
// Don't show error if request was aborted by user
if (error.name !== 'AbortError' || !currentAbortController) {
- Alpine.store("chat").add(
- "assistant",
- `Error: Failed to process MCP stream`,
- );
+ const errorChat = chatStore.getChat(chatId);
+ if (errorChat) {
+ chatStore.add(
+ "assistant",
+ `Error: Failed to process MCP stream`,
+ null,
+ null,
+ chatId
+ );
+ }
}
} finally {
// Perform any cleanup if necessary
if (reader) {
reader.releaseLock();
}
- currentReader = null;
- currentAbortController = null;
+ // Only clear global references if this was the active chat's request
+ const activeChat = chatStore.activeChat();
+ if (activeChat && activeChat.id === chatId) {
+ currentReader = null;
+ currentAbortController = null;
+ toggleLoader(false, chatId);
+ }
+ // Cleanup per-chat tracking
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
}
} else {
// Handle regular streaming response
@@ -1023,27 +1812,51 @@ async function promptGPT(systemPrompt, input) {
.getReader();
if (!reader) {
- Alpine.store("chat").add(
+ chatStore.add(
"assistant",
`Error: Failed to decode API response`,
+ null,
+ null,
+ chatId
);
- toggleLoader(false);
+ toggleLoader(false, chatId);
+ activeRequests.delete(chatId);
return;
}
- // Store reader globally so stop button can cancel it
+ // Store reader per-chat and globally
+ const request = activeRequests.get(chatId);
+ if (request) {
+ request.reader = reader;
+ // Ensure tracking is updated when reader is set
+ updateRequestTracking(chatId, true);
+ // Ensure interval is running (in case it wasn't started earlier)
+ startTokensPerSecondInterval();
+ }
currentReader = reader;
+ // Get target chat for this request
+ let targetChat = chatStore.getChat(chatId);
+ if (!targetChat) {
+ // Chat was deleted
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
+ return;
+ }
+
// Function to add content to the chat and handle DOM updates efficiently
const addToChat = (token) => {
- const chatStore = Alpine.store("chat");
- chatStore.add("assistant", token);
- // Count tokens for rate calculation (rough estimate: count characters/4)
- tokensReceived += Math.ceil(token.length / 4);
- updateTokensPerSecond();
- // Efficiently scroll into view without triggering multiple reflows
- // const messages = document.getElementById('messages');
- // messages.scrollTop = messages.scrollHeight;
+ const currentChat = chatStore.getChat(chatId);
+ if (!currentChat) return; // Chat was deleted
+ chatStore.add("assistant", token, null, null, chatId);
+ // Count tokens for rate calculation (per chat)
+ const request = activeRequests.get(chatId);
+ if (request) {
+ const tokenCount = Math.ceil(token.length / 4);
+ request.tokensReceived += tokenCount;
+ }
+ // Only update display if this is the active chat (interval will handle it)
+ // Don't call updateTokensPerSecond here to avoid unnecessary updates
};
let buffer = "";
@@ -1059,6 +1872,14 @@ async function promptGPT(systemPrompt, input) {
const { value, done } = await reader.read();
if (done) break;
+ // Check if chat still exists
+ targetChat = chatStore.getChat(chatId);
+ if (!targetChat) {
+ // Chat was deleted, abort
+ break;
+ }
+ const targetHistory = targetChat.history;
+
buffer += value;
let lines = buffer.split("\n");
@@ -1074,9 +1895,9 @@ async function promptGPT(systemPrompt, input) {
try {
const jsonData = JSON.parse(line.substring(6));
- // Update token usage if present
+ // Update token usage if present (for the chat that initiated this request)
if (jsonData.usage) {
- Alpine.store("chat").updateTokenUsage(jsonData.usage);
+ chatStore.updateTokenUsage(jsonData.usage, chatId);
}
const token = jsonData.choices[0].delta.content;
@@ -1094,7 +1915,7 @@ async function promptGPT(systemPrompt, input) {
if (thinkingContent.trim()) {
// Only add the final thinking message if we don't already have one
if (lastThinkingMessageIndex === -1) {
- Alpine.store("chat").add("thinking", thinkingContent);
+ chatStore.add("thinking", thinkingContent, null, null, chatId);
}
}
return;
@@ -1103,21 +1924,28 @@ async function promptGPT(systemPrompt, input) {
// Handle content based on thinking state
if (isThinking) {
thinkingContent += token;
- // Count tokens for rate calculation
- tokensReceived += Math.ceil(token.length / 4);
- updateTokensPerSecond();
+ // Count tokens for rate calculation (per chat)
+ const request = activeRequests.get(chatId);
+ if (request) {
+ request.tokensReceived += Math.ceil(token.length / 4);
+ }
+ // Only update display if this is the active chat (interval will handle it)
+ // Don't call updateTokensPerSecond here to avoid unnecessary updates
// Update the last thinking message or create a new one
if (lastThinkingMessageIndex === -1) {
// Create new thinking message
- Alpine.store("chat").add("thinking", thinkingContent);
- lastThinkingMessageIndex = Alpine.store("chat").history.length - 1;
+ chatStore.add("thinking", thinkingContent, null, null, chatId);
+ const targetChat = chatStore.getChat(chatId);
+ lastThinkingMessageIndex = targetChat ? targetChat.history.length - 1 : -1;
} else {
// Update existing thinking message
- const chatStore = Alpine.store("chat");
- const lastMessage = chatStore.history[lastThinkingMessageIndex];
- if (lastMessage && lastMessage.role === "thinking") {
- lastMessage.content = thinkingContent;
- lastMessage.html = DOMPurify.sanitize(marked.parse(thinkingContent));
+ const currentChat = chatStore.getChat(chatId);
+ if (currentChat && lastThinkingMessageIndex >= 0) {
+ const lastMessage = currentChat.history[lastThinkingMessageIndex];
+ if (lastMessage && lastMessage.role === "thinking") {
+ lastMessage.content = thinkingContent;
+ lastMessage.html = DOMPurify.sanitize(marked.parse(thinkingContent));
+ }
}
}
// Scroll when thinking is updated (throttled)
@@ -1168,8 +1996,9 @@ async function promptGPT(systemPrompt, input) {
if (contentBuffer.length > 0) {
addToChat(contentBuffer.join(""));
}
- if (thinkingContent.trim() && lastThinkingMessageIndex === -1) {
- Alpine.store("chat").add("thinking", thinkingContent);
+ const finalChat = chatStore.getChat(chatId);
+ if (finalChat && thinkingContent.trim() && lastThinkingMessageIndex === -1) {
+ chatStore.add("thinking", thinkingContent, null, null, chatId);
}
// Highlight all code blocks once at the end
@@ -1177,23 +2006,41 @@ async function promptGPT(systemPrompt, input) {
} catch (error) {
// Don't show error if request was aborted by user
if (error.name !== 'AbortError' || !currentAbortController) {
- Alpine.store("chat").add(
- "assistant",
- `Error: Failed to process stream`,
- );
+ const currentChat = chatStore.getChat(chatId);
+ if (currentChat) {
+ chatStore.add(
+ "assistant",
+ `Error: Failed to process stream`,
+ null,
+ null,
+ chatId
+ );
+ }
}
} finally {
// Perform any cleanup if necessary
if (reader) {
reader.releaseLock();
}
- currentReader = null;
- currentAbortController = null;
+ // Only clear global references if this was the active chat's request
+ const activeChat = chatStore.activeChat();
+ if (activeChat && activeChat.id === chatId) {
+ currentReader = null;
+ currentAbortController = null;
+ toggleLoader(false, chatId);
+ }
+ // Cleanup per-chat tracking
+ activeRequests.delete(chatId);
+ updateRequestTracking(chatId, false);
}
}
// Remove class "loader" from the element with "loader" id
- toggleLoader(false);
+ // Only toggle loader off if this was the active chat
+ const finalActiveChat = chatStore.activeChat();
+ if (finalActiveChat && finalActiveChat.id === chatId) {
+ toggleLoader(false, chatId);
+ }
// scroll to the bottom of the chat consistently
setTimeout(() => {
@@ -1233,205 +2080,402 @@ document.addEventListener("alpine:init", () => {
// Check if store already exists (initialized in chat.html)
if (!Alpine.store("chat")) {
// Fallback initialization (should not be needed if chat.html loads correctly)
+ // This matches the structure in chat.html
+ function generateChatId() {
+ return "chat_" + Date.now() + "_" + Math.random().toString(36).substr(2, 9);
+ }
+
+ function getCurrentModel() {
+ const modelInput = document.getElementById("chat-model");
+ return modelInput ? modelInput.value : "";
+ }
+
Alpine.store("chat", {
- history: [],
- languages: [undefined],
- systemPrompt: "",
- mcpMode: false,
- contextSize: null,
- tokenUsage: {
- promptTokens: 0,
- completionTokens: 0,
- totalTokens: 0,
- currentRequest: null
- },
- clear() {
- this.history.length = 0;
- this.tokenUsage = {
- promptTokens: 0,
- completionTokens: 0,
- totalTokens: 0,
- currentRequest: null
- };
- },
- updateTokenUsage(usage) {
- // Usage values in streaming responses are cumulative totals for the current request
- // We track session totals separately and only update when we see new (higher) values
- if (usage) {
- const currentRequest = this.tokenUsage.currentRequest || {
- promptTokens: 0,
- completionTokens: 0,
- totalTokens: 0
+ chats: [],
+ activeChatId: null,
+ chatIdCounter: 0,
+ languages: [undefined],
+ activeRequestIds: [], // Track chat IDs with active requests for UI reactivity
+
+ activeChat() {
+ if (!this.activeChatId) return null;
+ return this.chats.find(c => c.id === this.activeChatId) || null;
+ },
+
+ getChat(chatId) {
+ return this.chats.find(c => c.id === chatId) || null;
+ },
+
+ createChat(model, systemPrompt, mcpMode) {
+ const chatId = generateChatId();
+ const now = Date.now();
+ const chat = {
+ id: chatId,
+ name: "New Chat",
+ model: model || getCurrentModel() || "",
+ history: [],
+ systemPrompt: systemPrompt || "",
+ mcpMode: mcpMode || false,
+ tokenUsage: {
+ promptTokens: 0,
+ completionTokens: 0,
+ totalTokens: 0,
+ currentRequest: null
+ },
+ contextSize: null,
+ createdAt: now,
+ updatedAt: now
};
+ this.chats.push(chat);
+ this.activeChatId = chatId;
+ return chat;
+ },
+
+ switchChat(chatId) {
+ if (this.chats.find(c => c.id === chatId)) {
+ this.activeChatId = chatId;
+ return true;
+ }
+ return false;
+ },
+
+ deleteChat(chatId) {
+ const index = this.chats.findIndex(c => c.id === chatId);
+ if (index === -1) return false;
- // Check if this is a new/updated usage (values increased)
- const isNewUsage =
- (usage.prompt_tokens !== undefined && usage.prompt_tokens > currentRequest.promptTokens) ||
- (usage.completion_tokens !== undefined && usage.completion_tokens > currentRequest.completionTokens) ||
- (usage.total_tokens !== undefined && usage.total_tokens > currentRequest.totalTokens);
+ this.chats.splice(index, 1);
- if (isNewUsage) {
- // Update session totals: subtract old request usage, add new
- this.tokenUsage.promptTokens = this.tokenUsage.promptTokens - currentRequest.promptTokens + (usage.prompt_tokens || 0);
- this.tokenUsage.completionTokens = this.tokenUsage.completionTokens - currentRequest.completionTokens + (usage.completion_tokens || 0);
- this.tokenUsage.totalTokens = this.tokenUsage.totalTokens - currentRequest.totalTokens + (usage.total_tokens || 0);
-
- // Store current request usage
- this.tokenUsage.currentRequest = {
- promptTokens: usage.prompt_tokens || 0,
- completionTokens: usage.completion_tokens || 0,
- totalTokens: usage.total_tokens || 0
+ if (this.activeChatId === chatId) {
+ if (this.chats.length > 0) {
+ this.activeChatId = this.chats[0].id;
+ } else {
+ this.createChat();
+ }
+ }
+ return true;
+ },
+
+ updateChatName(chatId, name) {
+ const chat = this.getChat(chatId);
+ if (chat) {
+ chat.name = name || "New Chat";
+ chat.updatedAt = Date.now();
+ return true;
+ }
+ return false;
+ },
+
+ clear() {
+ const chat = this.activeChat();
+ if (chat) {
+ chat.history.length = 0;
+ chat.tokenUsage = {
+ promptTokens: 0,
+ completionTokens: 0,
+ totalTokens: 0,
+ currentRequest: null
};
+ chat.updatedAt = Date.now();
}
- }
- },
- getRemainingTokens() {
- if (!this.contextSize) return null;
- return Math.max(0, this.contextSize - this.tokenUsage.totalTokens);
- },
- getContextUsagePercent() {
- if (!this.contextSize) return null;
- return Math.min(100, (this.tokenUsage.totalTokens / this.contextSize) * 100);
- },
- add(role, content, image, audio) {
- const N = this.history.length - 1;
- // For thinking and reasoning messages, always create a new message
- if (role === "thinking" || role === "reasoning") {
- let c = "";
- const lines = content.split("\n");
- lines.forEach((line) => {
- c += DOMPurify.sanitize(marked.parse(line));
- });
- this.history.push({ role, content, html: c, image, audio });
- }
- // For other messages, merge if same role
- else if (this.history.length && this.history[N].role === role) {
- this.history[N].content += content;
- this.history[N].html = DOMPurify.sanitize(
- marked.parse(this.history[N].content)
- );
- // Merge new images and audio with existing ones
- if (image && image.length > 0) {
- this.history[N].image = [...(this.history[N].image || []), ...image];
+ },
+
+ updateTokenUsage(usage, targetChatId = null) {
+ // If targetChatId is provided, update that chat, otherwise use active chat
+ // This ensures token usage updates go to the chat that initiated the request
+ const chat = targetChatId ? this.getChat(targetChatId) : this.activeChat();
+ if (!chat) return;
+
+ if (usage) {
+ const currentRequest = chat.tokenUsage.currentRequest || {
+ promptTokens: 0,
+ completionTokens: 0,
+ totalTokens: 0
+ };
+
+ const isNewUsage =
+ (usage.prompt_tokens !== undefined && usage.prompt_tokens > currentRequest.promptTokens) ||
+ (usage.completion_tokens !== undefined && usage.completion_tokens > currentRequest.completionTokens) ||
+ (usage.total_tokens !== undefined && usage.total_tokens > currentRequest.totalTokens);
+
+ if (isNewUsage) {
+ chat.tokenUsage.promptTokens = chat.tokenUsage.promptTokens - currentRequest.promptTokens + (usage.prompt_tokens || 0);
+ chat.tokenUsage.completionTokens = chat.tokenUsage.completionTokens - currentRequest.completionTokens + (usage.completion_tokens || 0);
+ chat.tokenUsage.totalTokens = chat.tokenUsage.totalTokens - currentRequest.totalTokens + (usage.total_tokens || 0);
+
+ chat.tokenUsage.currentRequest = {
+ promptTokens: usage.prompt_tokens || 0,
+ completionTokens: usage.completion_tokens || 0,
+ totalTokens: usage.total_tokens || 0
+ };
+ chat.updatedAt = Date.now();
+ }
}
- if (audio && audio.length > 0) {
- this.history[N].audio = [...(this.history[N].audio || []), ...audio];
+ },
+
+ getRemainingTokens() {
+ const chat = this.activeChat();
+ if (!chat || !chat.contextSize) return null;
+ return Math.max(0, chat.contextSize - chat.tokenUsage.totalTokens);
+ },
+
+ getContextUsagePercent() {
+ const chat = this.activeChat();
+ if (!chat || !chat.contextSize) return null;
+ return Math.min(100, (chat.tokenUsage.totalTokens / chat.contextSize) * 100);
+ },
+
+ // Check if a chat has an active request (for UI indicators)
+ hasActiveRequest(chatId) {
+ if (!chatId) return false;
+ // Use reactive array for Alpine.js reactivity
+ return this.activeRequestIds.includes(chatId);
+ },
+
+ // Update active request tracking (called from chat.js)
+ updateActiveRequestTracking(chatId, isActive) {
+ if (isActive) {
+ if (!this.activeRequestIds.includes(chatId)) {
+ this.activeRequestIds.push(chatId);
+ }
+ } else {
+ const index = this.activeRequestIds.indexOf(chatId);
+ if (index > -1) {
+ this.activeRequestIds.splice(index, 1);
+ }
}
- } else {
- let c = "";
- const lines = content.split("\n");
- lines.forEach((line) => {
- c += DOMPurify.sanitize(marked.parse(line));
- });
- this.history.push({
- role,
- content,
- html: c,
- image: image || [],
- audio: audio || []
- });
- }
- const chatContainer = document.getElementById('chat');
- if (chatContainer) {
- chatContainer.scrollTo({
- top: chatContainer.scrollHeight,
- behavior: 'smooth'
- });
- }
- // Also scroll thinking box if it's a thinking/reasoning message
- if (role === "thinking" || role === "reasoning") {
- setTimeout(() => {
- if (typeof window.scrollThinkingBoxToBottom === 'function') {
- window.scrollThinkingBoxToBottom();
+ },
+
+ add(role, content, image, audio, targetChatId = null) {
+ // If targetChatId is provided, add to that chat, otherwise use active chat
+ const chat = targetChatId ? this.getChat(targetChatId) : this.activeChat();
+ if (!chat) return;
+
+ const N = chat.history.length - 1;
+ if (role === "thinking" || role === "reasoning") {
+ let c = "";
+ const lines = content.split("\n");
+ lines.forEach((line) => {
+ c += DOMPurify.sanitize(marked.parse(line));
+ });
+ chat.history.push({ role, content, html: c, image, audio });
+ }
+ else if (chat.history.length && chat.history[N].role === role) {
+ chat.history[N].content += content;
+ chat.history[N].html = DOMPurify.sanitize(
+ marked.parse(chat.history[N].content)
+ );
+ if (image && image.length > 0) {
+ chat.history[N].image = [...(chat.history[N].image || []), ...image];
}
- }, 100);
- }
- const parser = new DOMParser();
- const html = parser.parseFromString(
- this.history[this.history.length - 1].html,
- "text/html"
- );
- const code = html.querySelectorAll("pre code");
- if (!code.length) return;
- code.forEach((el) => {
- const language = el.className.split("language-")[1];
- if (this.languages.includes(language)) return;
- const script = document.createElement("script");
- script.src = `https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/languages/${language}.min.js`;
- document.head.appendChild(script);
- this.languages.push(language);
- });
- },
- messages() {
- return this.history.map((message) => ({
- role: message.role,
- content: message.content,
- image: message.image,
- audio: message.audio,
- }));
- },
+ if (audio && audio.length > 0) {
+ chat.history[N].audio = [...(chat.history[N].audio || []), ...audio];
+ }
+ } else {
+ let c = "";
+ const lines = content.split("\n");
+ lines.forEach((line) => {
+ c += DOMPurify.sanitize(marked.parse(line));
+ });
+ chat.history.push({
+ role,
+ content,
+ html: c,
+ image: image || [],
+ audio: audio || []
+ });
+
+ if (role === "user" && chat.name === "New Chat" && content.trim()) {
+ const name = content.trim().substring(0, 50);
+ chat.name = name.length < content.trim().length ? name + "..." : name;
+ }
+ }
+
+ chat.updatedAt = Date.now();
+
+ const chatContainer = document.getElementById('chat');
+ if (chatContainer) {
+ chatContainer.scrollTo({
+ top: chatContainer.scrollHeight,
+ behavior: 'smooth'
+ });
+ }
+ if (role === "thinking" || role === "reasoning") {
+ setTimeout(() => {
+ if (typeof window.scrollThinkingBoxToBottom === 'function') {
+ window.scrollThinkingBoxToBottom();
+ }
+ }, 100);
+ }
+ const parser = new DOMParser();
+ const html = parser.parseFromString(
+ chat.history[chat.history.length - 1].html,
+ "text/html"
+ );
+ const code = html.querySelectorAll("pre code");
+ if (!code.length) return;
+ code.forEach((el) => {
+ const language = el.className.split("language-")[1];
+ if (this.languages.includes(language)) return;
+ const script = document.createElement("script");
+ script.src = `https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/languages/${language}.min.js`;
+ document.head.appendChild(script);
+ this.languages.push(language);
+ });
+ },
+
+ messages() {
+ const chat = this.activeChat();
+ if (!chat) return [];
+ return chat.history.map((message) => ({
+ role: message.role,
+ content: message.content,
+ image: message.image,
+ audio: message.audio,
+ }));
+ },
+
+ // Getter for active chat history to ensure reactivity
+ get activeHistory() {
+ const chat = this.activeChat();
+ return chat ? chat.history : [];
+ },
});
}
});
-// Check for message from index page on load
+// Check for message from index page on load and initialize chats
document.addEventListener('DOMContentLoaded', function() {
// Wait for Alpine to be ready
setTimeout(() => {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ console.error('Alpine store not initialized');
+ return;
+ }
+
+ const chatStore = Alpine.store("chat");
+
+ // Check for message from index page FIRST - if present, create new chat
const chatData = localStorage.getItem('localai_index_chat_data');
+ let shouldCreateNewChat = false;
+ let indexChatData = null;
+
if (chatData) {
try {
- const data = JSON.parse(chatData);
+ indexChatData = JSON.parse(chatData);
+ shouldCreateNewChat = true; // We have data from index, create new chat
+ } catch (error) {
+ console.error('Error parsing chat data from index:', error);
+ localStorage.removeItem('localai_index_chat_data');
+ }
+ }
+
+ // Load chats from storage FIRST (but don't set active yet if we're creating new from index)
+ const storedData = loadChatsFromStorage();
+
+ if (storedData && storedData.chats && storedData.chats.length > 0) {
+ // Restore chats from storage - clear existing and push new ones to maintain reactivity
+ chatStore.chats.length = 0;
+ storedData.chats.forEach(chat => {
+ chatStore.chats.push(chat);
+ });
+ // Don't set activeChatId yet if we're creating a new chat from index
+ if (!shouldCreateNewChat) {
+ chatStore.activeChatId = storedData.activeChatId || storedData.chats[0].id;
- // Set MCP mode if provided
- if (data.mcpMode === true && Alpine.store("chat")) {
- Alpine.store("chat").mcpMode = true;
+ // Ensure active chat exists
+ if (!chatStore.activeChat()) {
+ chatStore.activeChatId = storedData.chats[0].id;
}
+ }
+ }
+
+ if (shouldCreateNewChat) {
+ // Create a new chat with the model from URL (which matches the selected model from index)
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ const newChat = chatStore.createChat(currentModel, "", indexChatData.mcpMode || false);
+
+ // Update context size from template if available
+ const contextSizeInput = document.getElementById("chat-model");
+ if (contextSizeInput && contextSizeInput.dataset.contextSize) {
+ const contextSize = parseInt(contextSizeInput.dataset.contextSize);
+ newChat.contextSize = contextSize;
+ }
+
+ // Set the message and files
+ const input = document.getElementById('input');
+ if (input && indexChatData.message) {
+ input.value = indexChatData.message;
- const input = document.getElementById('input');
+ // Process files if any
+ if (indexChatData.imageFiles && indexChatData.imageFiles.length > 0) {
+ indexChatData.imageFiles.forEach(file => {
+ images.push(file.data);
+ });
+ }
- if (input && data.message) {
- // Set the message in the input
- input.value = data.message;
-
- // Process files if any
- if (data.imageFiles && data.imageFiles.length > 0) {
- data.imageFiles.forEach(file => {
- images.push(file.data);
- });
- }
-
- if (data.audioFiles && data.audioFiles.length > 0) {
- data.audioFiles.forEach(file => {
- audios.push(file.data);
- });
- }
-
- if (data.textFiles && data.textFiles.length > 0) {
- data.textFiles.forEach(file => {
- fileContents.push({ name: file.name, content: file.data });
- currentFileNames.push(file.name);
- });
- }
-
- // Clear localStorage
- localStorage.removeItem('localai_index_chat_data');
-
- // Auto-submit after a short delay to ensure everything is ready
- setTimeout(() => {
- if (input.value.trim()) {
- processAndSendMessage(input.value);
- }
- }, 500);
- } else {
- // No message, but might have mcpMode - clear localStorage
- localStorage.removeItem('localai_index_chat_data');
+ if (indexChatData.audioFiles && indexChatData.audioFiles.length > 0) {
+ indexChatData.audioFiles.forEach(file => {
+ audios.push(file.data);
+ });
}
- } catch (error) {
- console.error('Error processing chat data from index:', error);
+
+ if (indexChatData.textFiles && indexChatData.textFiles.length > 0) {
+ indexChatData.textFiles.forEach(file => {
+ fileContents.push({ name: file.name, content: file.data });
+ currentFileNames.push(file.name);
+ });
+ }
+
+ // Clear localStorage
localStorage.removeItem('localai_index_chat_data');
+
+ // Save the new chat
+ saveChatsToStorage();
+
+ // Update UI to reflect new active chat
+ updateUIForActiveChat();
+
+ // Auto-submit after a short delay to ensure everything is ready
+ setTimeout(() => {
+ if (input.value.trim()) {
+ processAndSendMessage(input.value);
+ }
+ }, 500);
+ } else {
+ // No message, but might have mcpMode - clear localStorage
+ localStorage.removeItem('localai_index_chat_data');
+ saveChatsToStorage();
+ updateUIForActiveChat();
}
+ } else {
+ // Normal flow: create default chat if none exist
+ if (!storedData || !storedData.chats || storedData.chats.length === 0) {
+ const currentModel = document.getElementById("chat-model")?.value || "";
+ const oldSystemPrompt = localStorage.getItem(SYSTEM_PROMPT_STORAGE_KEY);
+ chatStore.createChat(currentModel, oldSystemPrompt || "", false);
+
+ // Remove old system prompt key after migration
+ if (oldSystemPrompt) {
+ localStorage.removeItem(SYSTEM_PROMPT_STORAGE_KEY);
+ }
+ }
+
+ // Update context size from template if available
+ const contextSizeInput = document.getElementById("chat-model");
+ if (contextSizeInput && contextSizeInput.dataset.contextSize) {
+ const contextSize = parseInt(contextSizeInput.dataset.contextSize);
+ const activeChat = chatStore.activeChat();
+ if (activeChat) {
+ activeChat.contextSize = contextSize;
+ }
+ }
+
+ // Update UI to reflect active chat
+ updateUIForActiveChat();
}
+
+ // Save initial state
+ saveChatsToStorage();
}, 300);
});
diff --git a/core/http/views/chat.html b/core/http/views/chat.html
index fadf57d85950..a479504b90e7 100644
--- a/core/http/views/chat.html
+++ b/core/http/views/chat.html
@@ -61,37 +61,140 @@
}
if (Alpine.store("chat")) {
- Alpine.store("chat").contextSize = __chatContextSize;
- Alpine.store("chat").mcpMode = initialMcpMode;
+ // Store already initialized, just update context size if needed
+ const activeChat = Alpine.store("chat").activeChat();
+ if (activeChat && __chatContextSize !== null) {
+ activeChat.contextSize = __chatContextSize;
+ }
return;
}
+ // Generate unique chat ID
+ function generateChatId() {
+ return "chat_" + Date.now() + "_" + Math.random().toString(36).substr(2, 9);
+ }
+
+ // Get current model from URL or input
+ function getCurrentModel() {
+ const modelInput = document.getElementById("chat-model");
+ return modelInput ? modelInput.value : "";
+ }
+
Alpine.store("chat", {
- history: [],
+ chats: [],
+ activeChatId: null,
+ chatIdCounter: 0,
languages: [undefined],
- systemPrompt: "",
- mcpMode: initialMcpMode,
- contextSize: __chatContextSize,
- tokenUsage: {
- promptTokens: 0,
- completionTokens: 0,
- totalTokens: 0,
- currentRequest: null
+ activeRequestIds: [], // Track chat IDs with active requests for UI reactivity
+
+ // Helper to get active chat
+ activeChat() {
+ if (!this.activeChatId) return null;
+ return this.chats.find(c => c.id === this.activeChatId) || null;
},
- clear() {
- this.history.length = 0;
- this.tokenUsage = {
- promptTokens: 0,
- completionTokens: 0,
- totalTokens: 0,
- currentRequest: null
+
+ // Helper to get chat by ID
+ getChat(chatId) {
+ return this.chats.find(c => c.id === chatId) || null;
+ },
+
+ // Create a new chat
+ createChat(model, systemPrompt, mcpMode) {
+ const chatId = generateChatId();
+ const now = Date.now();
+ const chat = {
+ id: chatId,
+ name: "New Chat",
+ model: model || getCurrentModel() || "",
+ history: [],
+ systemPrompt: systemPrompt || "",
+ mcpMode: mcpMode || false,
+ temperature: null, // null means use default
+ topP: null, // null means use default
+ topK: null, // null means use default
+ tokenUsage: {
+ promptTokens: 0,
+ completionTokens: 0,
+ totalTokens: 0,
+ currentRequest: null
+ },
+ contextSize: __chatContextSize,
+ createdAt: now,
+ updatedAt: now
};
+ this.chats.push(chat);
+ this.activeChatId = chatId;
+ return chat;
},
- updateTokenUsage(usage) {
+
+ // Switch to a different chat
+ switchChat(chatId) {
+ if (this.chats.find(c => c.id === chatId)) {
+ this.activeChatId = chatId;
+ // Update context size if needed
+ const chat = this.activeChat();
+ if (chat && __chatContextSize !== null) {
+ chat.contextSize = __chatContextSize;
+ }
+ return true;
+ }
+ return false;
+ },
+
+ // Delete a chat
+ deleteChat(chatId) {
+ const index = this.chats.findIndex(c => c.id === chatId);
+ if (index === -1) return false;
+
+ this.chats.splice(index, 1);
+
+ // If deleted chat was active, switch to another or create new
+ if (this.activeChatId === chatId) {
+ if (this.chats.length > 0) {
+ this.activeChatId = this.chats[0].id;
+ } else {
+ // Create a new default chat
+ this.createChat();
+ }
+ }
+ return true;
+ },
+
+ // Update chat name
+ updateChatName(chatId, name) {
+ const chat = this.getChat(chatId);
+ if (chat) {
+ chat.name = name || "New Chat";
+ chat.updatedAt = Date.now();
+ return true;
+ }
+ return false;
+ },
+
+ clear() {
+ const chat = this.activeChat();
+ if (chat) {
+ chat.history.length = 0;
+ chat.tokenUsage = {
+ promptTokens: 0,
+ completionTokens: 0,
+ totalTokens: 0,
+ currentRequest: null
+ };
+ chat.updatedAt = Date.now();
+ }
+ },
+
+ updateTokenUsage(usage, targetChatId = null) {
+ // If targetChatId is provided, update that chat, otherwise use active chat
+ // This ensures token usage updates go to the chat that initiated the request
+ const chat = targetChatId ? this.getChat(targetChatId) : this.activeChat();
+ if (!chat) return;
+
// Usage values in streaming responses are cumulative totals for the current request
// We track session totals separately and only update when we see new (higher) values
if (usage) {
- const currentRequest = this.tokenUsage.currentRequest || {
+ const currentRequest = chat.tokenUsage.currentRequest || {
promptTokens: 0,
completionTokens: 0,
totalTokens: 0
@@ -105,29 +208,61 @@
if (isNewUsage) {
// Update session totals: subtract old request usage, add new
- this.tokenUsage.promptTokens = this.tokenUsage.promptTokens - currentRequest.promptTokens + (usage.prompt_tokens || 0);
- this.tokenUsage.completionTokens = this.tokenUsage.completionTokens - currentRequest.completionTokens + (usage.completion_tokens || 0);
- this.tokenUsage.totalTokens = this.tokenUsage.totalTokens - currentRequest.totalTokens + (usage.total_tokens || 0);
+ chat.tokenUsage.promptTokens = chat.tokenUsage.promptTokens - currentRequest.promptTokens + (usage.prompt_tokens || 0);
+ chat.tokenUsage.completionTokens = chat.tokenUsage.completionTokens - currentRequest.completionTokens + (usage.completion_tokens || 0);
+ chat.tokenUsage.totalTokens = chat.tokenUsage.totalTokens - currentRequest.totalTokens + (usage.total_tokens || 0);
// Store current request usage
- this.tokenUsage.currentRequest = {
+ chat.tokenUsage.currentRequest = {
promptTokens: usage.prompt_tokens || 0,
completionTokens: usage.completion_tokens || 0,
totalTokens: usage.total_tokens || 0
};
+ chat.updatedAt = Date.now();
}
}
},
+
getRemainingTokens() {
- if (!this.contextSize) return null;
- return Math.max(0, this.contextSize - this.tokenUsage.totalTokens);
+ const chat = this.activeChat();
+ if (!chat || !chat.contextSize) return null;
+ return Math.max(0, chat.contextSize - chat.tokenUsage.totalTokens);
},
+
getContextUsagePercent() {
- if (!this.contextSize) return null;
- return Math.min(100, (this.tokenUsage.totalTokens / this.contextSize) * 100);
+ const chat = this.activeChat();
+ if (!chat || !chat.contextSize) return null;
+ return Math.min(100, (chat.tokenUsage.totalTokens / chat.contextSize) * 100);
},
- add(role, content, image, audio) {
- const N = this.history.length - 1;
+
+ // Check if a chat has an active request (for UI indicators)
+ hasActiveRequest(chatId) {
+ if (!chatId) return false;
+ // Use reactive array for Alpine.js reactivity
+ return this.activeRequestIds.includes(chatId);
+ },
+
+ // Update active request tracking (called from chat.js)
+ updateActiveRequestTracking(chatId, isActive) {
+ if (isActive) {
+ if (!this.activeRequestIds.includes(chatId)) {
+ this.activeRequestIds.push(chatId);
+ }
+ } else {
+ const index = this.activeRequestIds.indexOf(chatId);
+ if (index > -1) {
+ this.activeRequestIds.splice(index, 1);
+ }
+ }
+ },
+
+ add(role, content, image, audio, targetChatId = null) {
+ // If targetChatId is provided, add to that chat, otherwise use active chat
+ // This allows streaming to continue to the correct chat even if user switches
+ const chat = targetChatId ? this.getChat(targetChatId) : this.activeChat();
+ if (!chat) return;
+
+ const N = chat.history.length - 1;
// For thinking, reasoning, tool_call, and tool_result messages, always create a new message
if (role === "thinking" || role === "reasoning" || role === "tool_call" || role === "tool_result") {
let c = "";
@@ -154,23 +289,28 @@
}
// Set expanded state: thinking is expanded by default in non-MCP mode, collapsed in MCP mode
// Reasoning, tool_call, and tool_result are always collapsed by default
- const isMCPMode = this.mcpMode || false;
+ const isMCPMode = chat.mcpMode || false;
const shouldExpand = (role === "thinking" && !isMCPMode) || false;
- this.history.push({ role, content, html: c, image, audio, expanded: shouldExpand });
+ chat.history.push({ role, content, html: c, image, audio, expanded: shouldExpand });
+ // Auto-name chat from first user message
+ if (role === "user" && chat.name === "New Chat" && content.trim()) {
+ const name = content.trim().substring(0, 50);
+ chat.name = name.length < content.trim().length ? name + "..." : name;
+ }
}
// For other messages, merge if same role
- else if (this.history.length && this.history[N].role === role) {
- this.history[N].content += content;
- this.history[N].html = DOMPurify.sanitize(
- marked.parse(this.history[N].content)
+ else if (chat.history.length && chat.history[N].role === role) {
+ chat.history[N].content += content;
+ chat.history[N].html = DOMPurify.sanitize(
+ marked.parse(chat.history[N].content)
);
// Merge new images and audio with existing ones
if (image && image.length > 0) {
- this.history[N].image = [...(this.history[N].image || []), ...image];
+ chat.history[N].image = [...(chat.history[N].image || []), ...image];
}
if (audio && audio.length > 0) {
- this.history[N].audio = [...(this.history[N].audio || []), ...audio];
+ chat.history[N].audio = [...(chat.history[N].audio || []), ...audio];
}
} else {
let c = "";
@@ -178,14 +318,28 @@
lines.forEach((line) => {
c += DOMPurify.sanitize(marked.parse(line));
});
- this.history.push({
+ chat.history.push({
role,
content,
html: c,
image: image || [],
audio: audio || []
});
+
+ // Auto-name chat from first user message
+ if (role === "user" && chat.name === "New Chat" && content.trim()) {
+ const name = content.trim().substring(0, 50);
+ chat.name = name.length < content.trim().length ? name + "..." : name;
+ }
+ }
+
+ chat.updatedAt = Date.now();
+
+ // Auto-save after adding message
+ if (typeof autoSaveChats === 'function') {
+ autoSaveChats();
}
+
// Scroll to bottom consistently for all messages (use #chat as it's the scrollable container)
setTimeout(() => {
const chatContainer = document.getElementById('chat');
@@ -204,7 +358,7 @@
}, 100);
const parser = new DOMParser();
const html = parser.parseFromString(
- this.history[this.history.length - 1].html,
+ chat.history[chat.history.length - 1].html,
"text/html"
);
const code = html.querySelectorAll("pre code");
@@ -242,14 +396,23 @@
}, 100);
}
},
+
messages() {
- return this.history.map((message) => ({
+ const chat = this.activeChat();
+ if (!chat) return [];
+ return chat.history.map((message) => ({
role: message.role,
content: message.content,
image: message.image,
audio: message.audio,
}));
},
+
+ // Getter for active chat history to ensure reactivity
+ get activeHistory() {
+ const chat = this.activeChat();
+ return chat ? chat.history : [];
+ },
});
}
@@ -265,57 +428,161 @@
// DOM already loaded, try immediately
if (window.Alpine) __initChatStore();
}
+
+ // Function to update model and context size when model selector changes
+ window.updateModelAndContextSize = function(selectElement) {
+ if (!window.Alpine || !Alpine.store("chat")) {
+ // Fallback: navigate to new model URL
+ window.location = selectElement.value;
+ return;
+ }
+
+ const chatStore = Alpine.store("chat");
+ const activeChat = chatStore.activeChat();
+
+ if (!activeChat) {
+ window.location = selectElement.value;
+ return;
+ }
+
+ // Get the selected option
+ const selectedOption = selectElement.options[selectElement.selectedIndex];
+ const modelName = selectElement.value.replace('chat/', '');
+
+ // Update model name
+ activeChat.model = modelName;
+ activeChat.updatedAt = Date.now();
+
+ // Get context size from data attribute
+ let contextSize = null;
+ if (selectedOption.dataset.contextSize) {
+ contextSize = parseInt(selectedOption.dataset.contextSize);
+ if (!isNaN(contextSize)) {
+ activeChat.contextSize = contextSize;
+ } else {
+ activeChat.contextSize = null;
+ }
+ } else {
+ // No context size available, set to null
+ activeChat.contextSize = null;
+ }
+
+ // Check MCP availability from data attribute
+ const hasMCP = selectedOption.getAttribute('data-has-mcp') === 'true';
+ if (!hasMCP) {
+ // If model doesn't support MCP, disable MCP mode
+ activeChat.mcpMode = false;
+ }
+ // Note: We don't enable MCP mode automatically, user must toggle it
+
+ // Update the hidden input for consistency
+ const contextSizeInput = document.getElementById("chat-model");
+ if (contextSizeInput) {
+ contextSizeInput.value = modelName;
+ if (contextSize) {
+ contextSizeInput.setAttribute('data-context-size', contextSize);
+ } else {
+ contextSizeInput.removeAttribute('data-context-size');
+ }
+ if (hasMCP) {
+ contextSizeInput.setAttribute('data-has-mcp', 'true');
+ } else {
+ contextSizeInput.setAttribute('data-has-mcp', 'false');
+ }
+ }
+
+ // Trigger MCP availability check in Alpine component
+ // The MCP toggle component will reactively check the data-has-mcp attribute
+
+ // Save to storage
+ if (typeof autoSaveChats === 'function') {
+ autoSaveChats();
+ }
+
+ // Update UI - this will refresh the statistics display
+ if (typeof updateUIForActiveChat === 'function') {
+ updateUIForActiveChat();
+ }
+
+ // Trigger MCP availability check in Alpine component
+ // Dispatch a custom event that the MCP toggle component can listen to
+ const modelSelector = document.getElementById('modelSelector');
+ if (modelSelector) {
+ // Trigger Alpine reactivity by dispatching change event
+ modelSelector.dispatchEvent(new Event('change', { bubbles: true }));
+ }
+ }
{{ $allGalleryConfigs:=.GalleryConfig }}
{{ $model:=.Model}}
-
+
{{template "views/partials/navbar" .}}