From bf23a7f84fe376de21026f9c7b4a965c625821d6 Mon Sep 17 00:00:00 2001 From: Kang Jiazheng <108711748+kkkjz@users.noreply.github.com> Date: Sun, 13 Jul 2025 11:34:53 +0800 Subject: [PATCH 1/4] Delete memoryos-mcp directory --- memoryos-mcp/config.json | 13 - memoryos-mcp/mcp.json | 40 --- memoryos-mcp/memoryos/__init__.py | 4 - memoryos-mcp/memoryos/long_term.py | 156 ------------ memoryos-mcp/memoryos/memoryos.py | 295 --------------------- memoryos-mcp/memoryos/mid_term.py | 324 ----------------------- memoryos-mcp/memoryos/prompts.py | 238 ----------------- memoryos-mcp/memoryos/retriever.py | 101 -------- memoryos-mcp/memoryos/short_term.py | 61 ----- memoryos-mcp/memoryos/updater.py | 199 --------------- memoryos-mcp/memoryos/utils.py | 227 ----------------- memoryos-mcp/requirements.txt | 19 -- memoryos-mcp/server_new.py | 292 --------------------- memoryos-mcp/test_comprehensive.py | 381 ---------------------------- 14 files changed, 2350 deletions(-) delete mode 100644 memoryos-mcp/config.json delete mode 100644 memoryos-mcp/mcp.json delete mode 100644 memoryos-mcp/memoryos/__init__.py delete mode 100644 memoryos-mcp/memoryos/long_term.py delete mode 100644 memoryos-mcp/memoryos/memoryos.py delete mode 100644 memoryos-mcp/memoryos/mid_term.py delete mode 100644 memoryos-mcp/memoryos/prompts.py delete mode 100644 memoryos-mcp/memoryos/retriever.py delete mode 100644 memoryos-mcp/memoryos/short_term.py delete mode 100644 memoryos-mcp/memoryos/updater.py delete mode 100644 memoryos-mcp/memoryos/utils.py delete mode 100644 memoryos-mcp/requirements.txt delete mode 100644 memoryos-mcp/server_new.py delete mode 100644 memoryos-mcp/test_comprehensive.py diff --git a/memoryos-mcp/config.json b/memoryos-mcp/config.json deleted file mode 100644 index 16ccf38..0000000 --- a/memoryos-mcp/config.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "user_id": "test_user_001", - "openai_api_key": "", - "openai_base_url": "", - "data_storage_path": "./memoryos_data", - "assistant_id": "memoryos_assistant", - "short_term_capacity": 10, - "mid_term_capacity": 2000, - "long_term_knowledge_capacity": 100, - "retrieval_queue_capacity": 7, - "mid_term_heat_threshold": 5.0, - "llm_model": "gpt-4o-mini" -} \ No newline at end of file diff --git a/memoryos-mcp/mcp.json b/memoryos-mcp/mcp.json deleted file mode 100644 index 8677e30..0000000 --- a/memoryos-mcp/mcp.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "mcpServers": { - "memoryos": { - "command": "/root/miniconda3/envs/memos/bin/python", - "args": [ - "/root/autodl-tmp/memoryos-mcp/server_new.py", - "--config", - "/root/autodl-tmp/memoryos-mcp/config.json" - ], - "env": {}, - "description": "MemoryOS MCP Server - 智能记忆系统,提供记忆添加、检索和用户画像功能", - "capabilities": { - "tools": [ - { - "name": "add_memory", - "description": "Add new memory to the MemoryOS system. (user_input and assistant_response pair)" - }, - { - "name": "retrieve_memory", - "description": "Retrieve related memories and context information from MemoryOS based on the query" - }, - { - "name": "get_user_profile", - "description": "Get user profile information, including personality traits, preferences, and related knowledge" - } - ], - "resources": [ - { - "uri": "memoryos://status", - "name": "MemoryOS系统状态" - }, - { - "uri": "memoryos://config", - "name": "MemoryOS配置信息" - } - ] - } - } - } -} \ No newline at end of file diff --git a/memoryos-mcp/memoryos/__init__.py b/memoryos-mcp/memoryos/__init__.py deleted file mode 100644 index a29eca9..0000000 --- a/memoryos-mcp/memoryos/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Import the main class for easy access -from .memoryos import Memoryos - -__all__ = ['Memoryos'] \ No newline at end of file diff --git a/memoryos-mcp/memoryos/long_term.py b/memoryos-mcp/memoryos/long_term.py deleted file mode 100644 index f1ebe69..0000000 --- a/memoryos-mcp/memoryos/long_term.py +++ /dev/null @@ -1,156 +0,0 @@ -import json -import numpy as np -import faiss -from collections import deque -from utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists - -class LongTermMemory: - def __init__(self, file_path, knowledge_capacity=100): - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.knowledge_capacity = knowledge_capacity - self.user_profiles = {} # {user_id: {data: "profile_string", "last_updated": "timestamp"}} - # Use deques for knowledge bases to easily manage capacity - self.knowledge_base = deque(maxlen=self.knowledge_capacity) # For general/user private knowledge - self.assistant_knowledge = deque(maxlen=self.knowledge_capacity) # For assistant specific knowledge - self.load() - - def update_user_profile(self, user_id, new_data, merge=True): - if merge and user_id in self.user_profiles and self.user_profiles[user_id].get("data"): # Check if data exists - current_data = self.user_profiles[user_id]["data"] - if isinstance(current_data, str) and isinstance(new_data, str): - updated_data = f"{current_data}\n\n--- Updated on {get_timestamp()} ---\n{new_data}" - else: # Fallback to overwrite if types are not strings or for more complex merge - updated_data = new_data - else: - # If merge=False or no existing data, replace with new data - updated_data = new_data - - self.user_profiles[user_id] = { - "data": updated_data, - "last_updated": get_timestamp() - } - print(f"LongTermMemory: Updated user profile for {user_id} (merge={merge}).") - self.save() - - def get_raw_user_profile(self, user_id): - return self.user_profiles.get(user_id, {}).get("data", "None") # Return "None" string if not found - - def get_user_profile_data(self, user_id): - return self.user_profiles.get(user_id, {}) - - def add_knowledge_entry(self, knowledge_text, knowledge_deque: deque, type_name="knowledge"): - if not knowledge_text or knowledge_text.strip().lower() in ["", "none", "- none", "- none."]: - print(f"LongTermMemory: Empty {type_name} received, not saving.") - return - - # If deque is full, the oldest item is automatically removed when appending. - vec = get_embedding(knowledge_text) - vec = normalize_vector(vec).tolist() - entry = { - "knowledge": knowledge_text, - "timestamp": get_timestamp(), - "knowledge_embedding": vec - } - knowledge_deque.append(entry) - print(f"LongTermMemory: Added {type_name}. Current count: {len(knowledge_deque)}.") - self.save() - - def add_user_knowledge(self, knowledge_text): - self.add_knowledge_entry(knowledge_text, self.knowledge_base, "user knowledge") - - def add_assistant_knowledge(self, knowledge_text): - self.add_knowledge_entry(knowledge_text, self.assistant_knowledge, "assistant knowledge") - - def get_user_knowledge(self): - return list(self.knowledge_base) - - def get_assistant_knowledge(self): - return list(self.assistant_knowledge) - - def _search_knowledge_deque(self, query, knowledge_deque: deque, threshold=0.1, top_k=5): - if not knowledge_deque: - return [] - - query_vec = get_embedding(query) - query_vec = normalize_vector(query_vec) - - embeddings = [] - valid_entries = [] - for entry in knowledge_deque: - if "knowledge_embedding" in entry and entry["knowledge_embedding"]: - embeddings.append(np.array(entry["knowledge_embedding"], dtype=np.float32)) - valid_entries.append(entry) - else: - print(f"Warning: Entry without embedding found in knowledge_deque: {entry.get('knowledge','N/A')[:50]}") - - if not embeddings: - return [] - - embeddings_np = np.array(embeddings, dtype=np.float32) - if embeddings_np.ndim == 1: # Single item case - if embeddings_np.shape[0] == 0: return [] # Empty embeddings - embeddings_np = embeddings_np.reshape(1, -1) - - if embeddings_np.shape[0] == 0: # No valid embeddings - return [] - - dim = embeddings_np.shape[1] - index = faiss.IndexFlatIP(dim) # Using Inner Product for similarity - index.add(embeddings_np) - - query_arr = np.array([query_vec], dtype=np.float32) - distances, indices = index.search(query_arr, min(top_k, len(valid_entries))) # Search at most k or length of valid_entries - - results = [] - for i, idx in enumerate(indices[0]): - if idx != -1: # faiss returns -1 for no valid index - similarity_score = float(distances[0][i]) # For IndexFlatIP, distance is the dot product (similarity) - if similarity_score >= threshold: - results.append(valid_entries[idx]) # Add the original entry dict - - # Sort by similarity score descending before returning, as faiss might not guarantee order for IP - results.sort(key=lambda x: float(np.dot(np.array(x["knowledge_embedding"], dtype=np.float32), query_vec)), reverse=True) - return results - - def search_user_knowledge(self, query, threshold=0.1, top_k=5): - results = self._search_knowledge_deque(query, self.knowledge_base, threshold, top_k) - print(f"LongTermMemory: Searched user knowledge for '{query[:30]}...'. Found {len(results)} matches.") - return results - - def search_assistant_knowledge(self, query, threshold=0.1, top_k=5): - results = self._search_knowledge_deque(query, self.assistant_knowledge, threshold, top_k) - print(f"LongTermMemory: Searched assistant knowledge for '{query[:30]}...'. Found {len(results)} matches.") - return results - - def save(self): - data = { - "user_profiles": self.user_profiles, - "knowledge_base": list(self.knowledge_base), # Convert deques to lists for JSON serialization - "assistant_knowledge": list(self.assistant_knowledge) - } - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(data, f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving LongTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.user_profiles = data.get("user_profiles", {}) - # Load into deques, respecting maxlen - kb_data = data.get("knowledge_base", []) - self.knowledge_base = deque(kb_data, maxlen=self.knowledge_capacity) - - ak_data = data.get("assistant_knowledge", []) - self.assistant_knowledge = deque(ak_data, maxlen=self.knowledge_capacity) - - print(f"LongTermMemory: Loaded from {self.file_path}.") - except FileNotFoundError: - print(f"LongTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - print(f"LongTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - print(f"LongTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/memoryos.py b/memoryos-mcp/memoryos/memoryos.py deleted file mode 100644 index 00e813c..0000000 --- a/memoryos-mcp/memoryos/memoryos.py +++ /dev/null @@ -1,295 +0,0 @@ -import os -import json -from utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, gpt_update_profile, ensure_directory_exists - -import prompts -from short_term import ShortTermMemory -from mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic -from long_term import LongTermMemory -from updater import Updater -from retriever import Retriever - -# Heat threshold for triggering profile/knowledge update from mid-term memory -H_PROFILE_UPDATE_THRESHOLD = 5.0 -DEFAULT_ASSISTANT_ID = "default_assistant_profile" - -class Memoryos: - def __init__(self, user_id: str, - openai_api_key: str, - data_storage_path: str, - openai_base_url: str = None, - assistant_id: str = DEFAULT_ASSISTANT_ID, - short_term_capacity=10, - mid_term_capacity=2000, - long_term_knowledge_capacity=100, - retrieval_queue_capacity=7, - mid_term_heat_threshold=H_PROFILE_UPDATE_THRESHOLD, - llm_model="gpt-4o-mini" # Unified model for all LLM operations - ): - self.user_id = user_id - self.assistant_id = assistant_id - self.data_storage_path = os.path.abspath(data_storage_path) - self.llm_model = llm_model - - print(f"Initializing Memoryos for user '{self.user_id}' and assistant '{self.assistant_id}'. Data path: {self.data_storage_path}") - print(f"Using unified LLM model: {self.llm_model}") - - # Initialize OpenAI Client - self.client = OpenAIClient(api_key=openai_api_key, base_url=openai_base_url) - - # Define file paths for user-specific data - self.user_data_dir = os.path.join(self.data_storage_path, "users", self.user_id) - user_short_term_path = os.path.join(self.user_data_dir, "short_term.json") - user_mid_term_path = os.path.join(self.user_data_dir, "mid_term.json") - user_long_term_path = os.path.join(self.user_data_dir, "long_term_user.json") # User profile and their knowledge - - # Define file paths for assistant-specific data (knowledge) - self.assistant_data_dir = os.path.join(self.data_storage_path, "assistants", self.assistant_id) - assistant_long_term_path = os.path.join(self.assistant_data_dir, "long_term_assistant.json") - - # Ensure directories exist - ensure_directory_exists(user_short_term_path) # ensure_directory_exists operates on the file path, creating parent dirs - ensure_directory_exists(user_mid_term_path) - ensure_directory_exists(user_long_term_path) - ensure_directory_exists(assistant_long_term_path) - - # Initialize Memory Modules for User - self.short_term_memory = ShortTermMemory(file_path=user_short_term_path, max_capacity=short_term_capacity) - self.mid_term_memory = MidTermMemory(file_path=user_mid_term_path, client=self.client, max_capacity=mid_term_capacity) - self.user_long_term_memory = LongTermMemory(file_path=user_long_term_path, knowledge_capacity=long_term_knowledge_capacity) - - # Initialize Memory Module for Assistant Knowledge - self.assistant_long_term_memory = LongTermMemory(file_path=assistant_long_term_path, knowledge_capacity=long_term_knowledge_capacity) - - # Initialize Orchestration Modules - self.updater = Updater(short_term_memory=self.short_term_memory, - mid_term_memory=self.mid_term_memory, - long_term_memory=self.user_long_term_memory, # Updater primarily updates user's LTM profile/knowledge - client=self.client, - llm_model=self.llm_model) - self.retriever = Retriever( - mid_term_memory=self.mid_term_memory, - long_term_memory=self.user_long_term_memory, - assistant_long_term_memory=self.assistant_long_term_memory, # Pass assistant LTM - queue_capacity=retrieval_queue_capacity - ) - - self.mid_term_heat_threshold = mid_term_heat_threshold - - def _trigger_profile_and_knowledge_update_if_needed(self): - """ - Checks mid-term memory for hot segments and triggers profile/knowledge update if threshold is met. - Adapted from main_memoybank.py's update_user_profile_from_top_segment. - """ - if not self.mid_term_memory.heap: - return - - # Peek at the top of the heap (hottest segment) - # MidTermMemory heap stores (-H_segment, sid) - neg_heat, sid = self.mid_term_memory.heap[0] - current_heat = -neg_heat - - if current_heat >= self.mid_term_heat_threshold: - session = self.mid_term_memory.sessions.get(sid) - if not session: - self.mid_term_memory.rebuild_heap() # Clean up if session is gone - return - - # Get unanalyzed pages from this hot session - # A page is a dict: {"user_input": ..., "agent_response": ..., "timestamp": ..., "analyzed": False, ...} - unanalyzed_pages = [p for p in session.get("details", []) if not p.get("analyzed", False)] - - if unanalyzed_pages: - print(f"Memoryos: Mid-term session {sid} heat ({current_heat:.2f}) exceeded threshold. Analyzing {len(unanalyzed_pages)} pages for profile/knowledge update.") - - # Perform user profile analysis and knowledge extraction separately - # First call: User profile analysis - new_user_profile_text = gpt_user_profile_analysis(unanalyzed_pages, self.client, model=self.llm_model) - - # Second call: Knowledge extraction (user private data and assistant knowledge) - knowledge_result = gpt_knowledge_extraction(unanalyzed_pages, self.client, model=self.llm_model) - new_user_private_knowledge = knowledge_result.get("private") - new_assistant_knowledge = knowledge_result.get("assistant_knowledge") - - # Update User Profile in user's LTM - if new_user_profile_text and new_user_profile_text.lower() != "none": - old_profile = self.user_long_term_memory.get_raw_user_profile(self.user_id) - if old_profile and old_profile.lower() != "none": - updated_profile = gpt_update_profile(old_profile, new_user_profile_text, self.client, model=self.llm_model) - else: - updated_profile = new_user_profile_text - self.user_long_term_memory.update_user_profile(self.user_id, updated_profile, merge=False) # Don't merge, replace with latest - - # Add User Private Knowledge to user's LTM - if new_user_private_knowledge and new_user_private_knowledge.lower() != "none": - for line in new_user_private_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.user_long_term_memory.add_user_knowledge(line.strip()) - - # Add Assistant Knowledge to assistant's LTM - if new_assistant_knowledge and new_assistant_knowledge.lower() != "none": - for line in new_assistant_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.assistant_long_term_memory.add_assistant_knowledge(line.strip()) # Save to dedicated assistant LTM - - # Mark pages as analyzed and reset session heat contributors - for p in session["details"]: - p["analyzed"] = True # Mark all pages in session, or just unanalyzed_pages? - # Original code marked all pages in session - - session["N_visit"] = 0 # Reset visits after analysis - session["L_interaction"] = 0 # Reset interaction length contribution - # session["R_recency"] = 1.0 # Recency will re-calculate naturally - session["H_segment"] = compute_segment_heat(session) # Recompute heat with reset factors - session["last_visit_time"] = get_timestamp() # Update last visit time - - self.mid_term_memory.rebuild_heap() # Heap needs rebuild due to H_segment change - self.mid_term_memory.save() - print(f"Memoryos: Profile/Knowledge update for session {sid} complete. Heat reset.") - else: - print(f"Memoryos: Hot session {sid} has no unanalyzed pages. Skipping profile update.") - else: - # print(f"Memoryos: Top session {sid} heat ({current_heat:.2f}) below threshold. No profile update.") - pass # No action if below threshold - - def add_memory(self, user_input: str, agent_response: str, timestamp: str = None, meta_data: dict = None): - """ - Adds a new QA pair (memory) to the system. - meta_data is not used in the current refactoring but kept for future use. - """ - if not timestamp: - timestamp = get_timestamp() - - qa_pair = { - "user_input": user_input, - "agent_response": agent_response, - "timestamp": timestamp - # meta_data can be added here if it needs to be stored with the QA pair - } - self.short_term_memory.add_qa_pair(qa_pair) - print(f"Memoryos: Added QA to short-term. User: {user_input[:30]}...") - - if self.short_term_memory.is_full(): - print("Memoryos: Short-term memory full. Processing to mid-term.") - self.updater.process_short_term_to_mid_term() - - # After any memory addition that might impact mid-term, check for profile updates - self._trigger_profile_and_knowledge_update_if_needed() - - def get_response(self, query: str, relationship_with_user="friend", style_hint="", user_conversation_meta_data: dict = None) -> str: - """ - Generates a response to the user's query, incorporating memory and context. - """ - print(f"Memoryos: Generating response for query: '{query[:50]}...'") - - # 1. Retrieve context - retrieval_results = self.retriever.retrieve_context( - user_query=query, - user_id=self.user_id - # Using default thresholds from Retriever class for now - ) - retrieved_pages = retrieval_results["retrieved_pages"] - retrieved_user_knowledge = retrieval_results["retrieved_user_knowledge"] - retrieved_assistant_knowledge = retrieval_results["retrieved_assistant_knowledge"] - - # 2. Get short-term history - short_term_history = self.short_term_memory.get_all() - history_text = "\n".join([ - f"User: {qa.get('user_input', '')}\nAssistant: {qa.get('agent_response', '')} (Time: {qa.get('timestamp', '')})" - for qa in short_term_history - ]) - - # 3. Format retrieved mid-term pages (retrieval_queue equivalent) - retrieval_text = "\n".join([ - f"【Historical Memory】\nUser: {page.get('user_input', '')}\nAssistant: {page.get('agent_response', '')}\nTime: {page.get('timestamp', '')}\nConversation chain overview: {page.get('meta_info','N/A')}" - for page in retrieved_pages - ]) - - # 4. Get user profile - user_profile_text = self.user_long_term_memory.get_raw_user_profile(self.user_id) - if not user_profile_text or user_profile_text.lower() == "none": - user_profile_text = "No detailed profile available yet." - - # 5. Format retrieved user knowledge for background - user_knowledge_background = "" - if retrieved_user_knowledge: - user_knowledge_background = "\n【Relevant User Knowledge Entries】\n" - for kn_entry in retrieved_user_knowledge: - user_knowledge_background += f"- {kn_entry['knowledge']} (Recorded: {kn_entry['timestamp']})\n" - - background_context = f"【User Profile】\n{user_profile_text}\n{user_knowledge_background}" - - # 6. Format retrieved Assistant Knowledge (from assistant's LTM) - # Use retrieved assistant knowledge instead of all assistant knowledge - assistant_knowledge_text_for_prompt = "【Assistant Knowledge Base】\n" - if retrieved_assistant_knowledge: - for ak_entry in retrieved_assistant_knowledge: - assistant_knowledge_text_for_prompt += f"- {ak_entry['knowledge']} (Recorded: {ak_entry['timestamp']})\n" - else: - assistant_knowledge_text_for_prompt += "- No relevant assistant knowledge found for this query.\n" - - # 7. Format user_conversation_meta_data (if provided) - meta_data_text_for_prompt = "【Current Conversation Metadata】\n" - if user_conversation_meta_data: - try: - meta_data_text_for_prompt += json.dumps(user_conversation_meta_data, ensure_ascii=False, indent=2) - except TypeError: - meta_data_text_for_prompt += str(user_conversation_meta_data) - else: - meta_data_text_for_prompt += "None provided for this turn." - - # 8. Construct Prompts - system_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT.format( - relationship=relationship_with_user, - assistant_knowledge_text=assistant_knowledge_text_for_prompt, - meta_data_text=meta_data_text_for_prompt # Using meta_data_text placeholder for user_conversation_meta_data - ) - - user_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_USER_PROMPT.format( - history_text=history_text, - retrieval_text=retrieval_text, - background=background_context, - relationship=relationship_with_user, - query=query - ) - - messages = [ - {"role": "system", "content": system_prompt_text}, - {"role": "user", "content": user_prompt_text} - ] - - # 9. Call LLM for response - print("Memoryos: Calling LLM for final response generation...") - # print("System Prompt:\n", system_prompt_text) - # print("User Prompt:\n", user_prompt_text) - response_content = self.client.chat_completion( - model=self.llm_model, - messages=messages, - temperature=0.7, - max_tokens=1500 # As in original main - ) - - # 10. Add this interaction to memory - self.add_memory(user_input=query, agent_response=response_content, timestamp=get_timestamp()) - - return response_content - - # --- Helper/Maintenance methods (optional additions) --- - def get_user_profile_summary(self) -> str: - return self.user_long_term_memory.get_raw_user_profile(self.user_id) - - def get_assistant_knowledge_summary(self) -> list: - return self.assistant_long_term_memory.get_assistant_knowledge() - - def force_mid_term_analysis(self): - """Forces analysis of all unanalyzed pages in the hottest mid-term segment if heat is above 0. - Useful for testing or manual triggering. - """ - original_threshold = self.mid_term_heat_threshold - self.mid_term_heat_threshold = 0.0 # Temporarily lower threshold - print("Memoryos: Force-triggering mid-term analysis...") - self._trigger_profile_and_knowledge_update_if_needed() - self.mid_term_heat_threshold = original_threshold # Restore original threshold - - def __repr__(self): - return f"" \ No newline at end of file diff --git a/memoryos-mcp/memoryos/mid_term.py b/memoryos-mcp/memoryos/mid_term.py deleted file mode 100644 index 68ecc4d..0000000 --- a/memoryos-mcp/memoryos/mid_term.py +++ /dev/null @@ -1,324 +0,0 @@ -import json -import numpy as np -from collections import defaultdict -import faiss -import heapq -from datetime import datetime - -from utils import ( - get_timestamp, generate_id, get_embedding, normalize_vector, - llm_extract_keywords, compute_time_decay, ensure_directory_exists, OpenAIClient -) - -# Heat computation constants (can be tuned or made configurable) -HEAT_ALPHA = 1.0 -HEAT_BETA = 1.0 -HEAT_GAMMA = 1 -RECENCY_TAU_HOURS = 24 # For R_recency calculation in compute_segment_heat - -def compute_segment_heat(session, alpha=HEAT_ALPHA, beta=HEAT_BETA, gamma=HEAT_GAMMA, tau_hours=RECENCY_TAU_HOURS): - N_visit = session.get("N_visit", 0) - L_interaction = session.get("L_interaction", 0) - - # Calculate recency based on last_visit_time - R_recency = 1.0 # Default if no last_visit_time - if session.get("last_visit_time"): - R_recency = compute_time_decay(session["last_visit_time"], get_timestamp(), tau_hours) - - session["R_recency"] = R_recency # Update session's recency factor - return alpha * N_visit + beta * L_interaction + gamma * R_recency - -class MidTermMemory: - def __init__(self, file_path: str, client: OpenAIClient, max_capacity=2000): - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.client = client - self.max_capacity = max_capacity - self.sessions = {} # {session_id: session_object} - self.access_frequency = defaultdict(int) # {session_id: access_count_for_lfu} - self.heap = [] # Min-heap storing (-H_segment, session_id) for hottest segments - self.load() - - def get_page_by_id(self, page_id): - for session in self.sessions.values(): - for page in session.get("details", []): - if page.get("page_id") == page_id: - return page - return None - - def update_page_connections(self, prev_page_id, next_page_id): - if prev_page_id: - prev_page = self.get_page_by_id(prev_page_id) - if prev_page: - prev_page["next_page"] = next_page_id - if next_page_id: - next_page = self.get_page_by_id(next_page_id) - if next_page: - next_page["pre_page"] = prev_page_id - # self.save() # Avoid saving on every minor update; save at higher level operations - - def evict_lfu(self): - if not self.access_frequency or not self.sessions: - return - - lfu_sid = min(self.access_frequency, key=self.access_frequency.get) - print(f"MidTermMemory: LFU eviction. Session {lfu_sid} has lowest access frequency.") - - if lfu_sid not in self.sessions: - del self.access_frequency[lfu_sid] # Clean up access frequency if session already gone - self.rebuild_heap() - return - - session_to_delete = self.sessions.pop(lfu_sid) # Remove from sessions - del self.access_frequency[lfu_sid] # Remove from LFU tracking - - # Clean up page connections if this session's pages were linked - for page in session_to_delete.get("details", []): - prev_page_id = page.get("pre_page") - next_page_id = page.get("next_page") - # If a page from this session was linked to an external page, nullify the external link - if prev_page_id and not self.get_page_by_id(prev_page_id): # Check if prev page is still in memory - # This case should ideally not happen if connections are within sessions or handled carefully - pass - if next_page_id and not self.get_page_by_id(next_page_id): - pass - # More robustly, one might need to search all other sessions if inter-session linking was allowed - # For now, assuming internal consistency or that MemoryOS class manages higher-level links - - self.rebuild_heap() - self.save() - print(f"MidTermMemory: Evicted session {lfu_sid}.") - - def add_session(self, summary, details): - session_id = generate_id("session") - summary_vec = get_embedding(summary) - summary_vec = normalize_vector(summary_vec).tolist() - summary_keywords = list(llm_extract_keywords(summary, client=self.client)) - - processed_details = [] - for page_data in details: - page_id = page_data.get("page_id", generate_id("page")) - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - inp_vec = get_embedding(full_text) - inp_vec = normalize_vector(inp_vec).tolist() - page_keywords = list(llm_extract_keywords(full_text, client=self.client)) - - processed_page = { - **page_data, # Carry over existing fields like user_input, agent_response, timestamp - "page_id": page_id, - "page_embedding": inp_vec, - "page_keywords": page_keywords, - "preloaded": page_data.get("preloaded", False), # Preserve if passed - "analyzed": page_data.get("analyzed", False), # Preserve if passed - # pre_page, next_page, meta_info are handled by DynamicUpdater - } - processed_details.append(processed_page) - - current_ts = get_timestamp() - session_obj = { - "id": session_id, - "summary": summary, - "summary_keywords": summary_keywords, - "summary_embedding": summary_vec, - "details": processed_details, - "L_interaction": len(processed_details), - "R_recency": 1.0, # Initial recency - "N_visit": 0, - "H_segment": 0.0, # Initial heat, will be computed - "timestamp": current_ts, # Creation timestamp - "last_visit_time": current_ts, # Also initial last_visit_time for recency calc - "access_count_lfu": 0 # For LFU eviction policy - } - session_obj["H_segment"] = compute_segment_heat(session_obj) - self.sessions[session_id] = session_obj - self.access_frequency[session_id] = 0 # Initialize for LFU - heapq.heappush(self.heap, (-session_obj["H_segment"], session_id)) # Use negative heat for max-heap behavior - - print(f"MidTermMemory: Added new session {session_id}. Initial heat: {session_obj['H_segment']:.2f}.") - if len(self.sessions) > self.max_capacity: - self.evict_lfu() - self.save() - return session_id - - def rebuild_heap(self): - self.heap = [] - for sid, session_data in self.sessions.items(): - # Ensure H_segment is up-to-date before rebuilding heap if necessary - # session_data["H_segment"] = compute_segment_heat(session_data) - heapq.heappush(self.heap, (-session_data["H_segment"], sid)) - # heapq.heapify(self.heap) # Not needed if pushing one by one - # No save here, it's an internal operation often followed by other ops that save - - def insert_pages_into_session(self, summary_for_new_pages, keywords_for_new_pages, pages_to_insert, - similarity_threshold=0.6, keyword_similarity_alpha=1.0): - if not self.sessions: # If no existing sessions, just add as a new one - print("MidTermMemory: No existing sessions. Adding new session directly.") - return self.add_session(summary_for_new_pages, pages_to_insert) - - new_summary_vec = get_embedding(summary_for_new_pages) - new_summary_vec = normalize_vector(new_summary_vec) - - best_sid = None - best_overall_score = -1 - - for sid, existing_session in self.sessions.items(): - existing_summary_vec = np.array(existing_session["summary_embedding"], dtype=np.float32) - semantic_sim = float(np.dot(existing_summary_vec, new_summary_vec)) - - # Keyword similarity (Jaccard index based) - existing_keywords = set(existing_session.get("summary_keywords", [])) - new_keywords_set = set(keywords_for_new_pages) - s_topic_keywords = 0 - if existing_keywords and new_keywords_set: - intersection = len(existing_keywords.intersection(new_keywords_set)) - union = len(existing_keywords.union(new_keywords_set)) - if union > 0: - s_topic_keywords = intersection / union - - overall_score = semantic_sim + keyword_similarity_alpha * s_topic_keywords - - if overall_score > best_overall_score: - best_overall_score = overall_score - best_sid = sid - - if best_sid and best_overall_score >= similarity_threshold: - print(f"MidTermMemory: Merging pages into session {best_sid}. Score: {best_overall_score:.2f} (Threshold: {similarity_threshold})") - target_session = self.sessions[best_sid] - - processed_new_pages = [] - for page_data in pages_to_insert: - page_id = page_data.get("page_id", generate_id("page")) # Use existing or generate new ID - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - inp_vec = get_embedding(full_text) - inp_vec = normalize_vector(inp_vec).tolist() - page_keywords_current = list(llm_extract_keywords(full_text, client=self.client)) - - processed_page = { - **page_data, # Carry over existing fields - "page_id": page_id, - "page_embedding": inp_vec, - "page_keywords": page_keywords_current, - # analyzed, preloaded flags should be part of page_data if set - } - target_session["details"].append(processed_page) - processed_new_pages.append(processed_page) - - target_session["L_interaction"] += len(pages_to_insert) - target_session["last_visit_time"] = get_timestamp() # Update last visit time on modification - target_session["H_segment"] = compute_segment_heat(target_session) - self.rebuild_heap() # Rebuild heap as heat has changed - self.save() - return best_sid - else: - print(f"MidTermMemory: No suitable session to merge (best score {best_overall_score:.2f} < threshold {similarity_threshold}). Creating new session.") - return self.add_session(summary_for_new_pages, pages_to_insert) - - def search_sessions(self, query_text, segment_similarity_threshold=0.1, page_similarity_threshold=0.1, - top_k_sessions=5, keyword_alpha=1.0, recency_tau_search=3600): - if not self.sessions: - return [] - - query_vec = get_embedding(query_text) - query_vec = normalize_vector(query_vec) - query_keywords = set(llm_extract_keywords(query_text, client=self.client)) - - candidate_sessions = [] - session_ids = list(self.sessions.keys()) - if not session_ids: return [] - - summary_embeddings_list = [self.sessions[s]["summary_embedding"] for s in session_ids] - summary_embeddings_np = np.array(summary_embeddings_list, dtype=np.float32) - - dim = summary_embeddings_np.shape[1] - index = faiss.IndexFlatIP(dim) # Inner product for similarity - index.add(summary_embeddings_np) - - query_arr_np = np.array([query_vec], dtype=np.float32) - distances, indices = index.search(query_arr_np, min(top_k_sessions, len(session_ids))) - - results = [] - current_time_str = get_timestamp() - - for i, idx in enumerate(indices[0]): - if idx == -1: continue - - session_id = session_ids[idx] - session = self.sessions[session_id] - semantic_sim_score = float(distances[0][i]) # This is the dot product - - # Keyword similarity for session summary - session_keywords = set(session.get("summary_keywords", [])) - s_topic_keywords = 0 - if query_keywords and session_keywords: - intersection = len(query_keywords.intersection(session_keywords)) - union = len(query_keywords.union(session_keywords)) - if union > 0: s_topic_keywords = intersection / union - - # Time decay for session recency in search scoring - # time_decay_factor = compute_time_decay(session["timestamp"], current_time_str, tau_hours=recency_tau_search) - - # Combined score for session relevance - session_relevance_score = (semantic_sim_score + keyword_alpha * s_topic_keywords) - - if session_relevance_score >= segment_similarity_threshold: - matched_pages_in_session = [] - for page in session.get("details", []): - page_embedding = np.array(page["page_embedding"], dtype=np.float32) - # page_keywords = set(page.get("page_keywords", [])) - - page_sim_score = float(np.dot(page_embedding, query_vec)) - # Can also add keyword sim for pages if needed, but keeping it simpler for now - - if page_sim_score >= page_similarity_threshold: - matched_pages_in_session.append({"page_data": page, "score": page_sim_score}) - - if matched_pages_in_session: - # Update session access stats - session["N_visit"] += 1 - session["last_visit_time"] = current_time_str - session["access_count_lfu"] = session.get("access_count_lfu", 0) + 1 - self.access_frequency[session_id] = session["access_count_lfu"] - session["H_segment"] = compute_segment_heat(session) - self.rebuild_heap() # Heat changed - - results.append({ - "session_id": session_id, - "session_summary": session["summary"], - "session_relevance_score": session_relevance_score, - "matched_pages": sorted(matched_pages_in_session, key=lambda x: x["score"], reverse=True) # Sort pages by score - }) - - self.save() # Save changes from access updates - # Sort final results by session_relevance_score - return sorted(results, key=lambda x: x["session_relevance_score"], reverse=True) - - def save(self): - # Make a copy for saving to avoid modifying heap during iteration if it happens - # Though current heap is list of tuples, so direct modification risk is low - # sessions_to_save = {sid: data for sid, data in self.sessions.items()} - data_to_save = { - "sessions": self.sessions, - "access_frequency": dict(self.access_frequency), # Convert defaultdict to dict for JSON - # Heap is derived, no need to save typically, but can if desired for faster load - # "heap_snapshot": self.heap - } - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(data_to_save, f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving MidTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.sessions = data.get("sessions", {}) - self.access_frequency = defaultdict(int, data.get("access_frequency", {})) - self.rebuild_heap() # Rebuild heap from loaded sessions - print(f"MidTermMemory: Loaded from {self.file_path}. Sessions: {len(self.sessions)}.") - except FileNotFoundError: - print(f"MidTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - print(f"MidTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - print(f"MidTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/prompts.py b/memoryos-mcp/memoryos/prompts.py deleted file mode 100644 index e7b2480..0000000 --- a/memoryos-mcp/memoryos/prompts.py +++ /dev/null @@ -1,238 +0,0 @@ -""" -This file stores all the prompts used by the Memoryos system. -""" - -# Prompt for generating system response (from main_memoybank.py, generate_system_response_with_meta) -GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT = ( - "As a communication expert with outstanding communication habits, you embody the role of {relationship} throughout the following dialogues.\n" - "Here are some of your distinctive personal traits and knowledge:\n{assistant_knowledge_text}\n" - "User's profile:\n" - "{meta_data_text}\n" - "Your task is to generate responses that align with these traits and maintain the tone.\n" -) - -GENERATE_SYSTEM_RESPONSE_USER_PROMPT = ( - "\n" - "Drawing from your recent conversation with the user:\n" - "{history_text}\n\n" - "\n" - "The memories linked to the ongoing conversation are:\n" - "{retrieval_text}\n\n" - "\n" - "During the conversation process between you and the user in the past, you found that the user has the following characteristics:\n" - "{background}\n\n" - "Now, please role-play as {relationship} to continue the dialogue between you and the user.\n" - "The user just said: {query}\n" - "Please respond to the user's statement using the following format (maximum 30 words, must be in English):\n " - "When answering questions, be sure to check whether the timestamp of the referenced information matches the timeframe of the question" -) - -# Prompt for assistant knowledge extraction (from utils.py, analyze_assistant_knowledge) -ASSISTANT_KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are an assistant knowledge extraction engine. Rules: -1. Extract ONLY explicit statements about the assistant's identity or knowledge. -2. Use concise and factual statements in the first person. -3. If no relevant information is found, output "None".""" - -ASSISTANT_KNOWLEDGE_EXTRACTION_USER_PROMPT = """ -# Assistant Knowledge Extraction Task -Analyze the conversation and extract any fact or identity traits about the assistant. -If no traits can be extracted, reply with "None". Use the following format for output: -The generated content should be as concise as possible — the more concise, the better. -【Assistant Knowledge】 -- [Fact 1] -- [Fact 2] -- (Or "None" if none found) - -Few-shot examples: -1. User: Can you recommend some movies. - AI: Yes, I recommend Interstellar. - Time: 2023-10-01 - 【Assistant Knowledge】 - - I recommend Interstellar on 2023-10-01. - -2. User: Can you help me with cooking recipes? - AI: Yes, I have extensive knowledge of cooking recipes and techniques. - Time: 2023-10-02 - 【Assistant Knowledge】 - - I have cooking recipes and techniques on 2023-10-02. - -3. User: That's interesting. I didn't know you could do that. - AI: I'm glad you find it interesting! - 【Assistant Knowledge】 - - None - -Conversation: -{conversation} -""" - -# Prompt for summarizing dialogs (from utils.py, gpt_summarize) -SUMMARIZE_DIALOGS_SYSTEM_PROMPT = "You are an expert in summarizing dialogue topics. Generate extremely concise and precise summaries. Be as brief as possible while capturing the essence." -SUMMARIZE_DIALOGS_USER_PROMPT = "Please generate an concise topic summary based on the following conversation. Keep it to 2-3 short sentences maximum:\n{dialog_text}\nConcise Summary:" - -# Prompt for multi-summary generation (from utils.py, gpt_generate_multi_summary) -MULTI_SUMMARY_SYSTEM_PROMPT = "You are an expert in analyzing dialogue topics. Generate concise summaries. No more than two topics. Be as brief as possible." -MULTI_SUMMARY_USER_PROMPT = ("Please analyze the following dialogue and generate extremely concise subtopic summaries (if applicable), with a maximum of two themes.\n" - "Each summary should be very brief - just a few words for the theme and content. Format as JSON array:\n" - "[\n {{\"theme\": \"Brief theme\", \"keywords\": [\"key1\", \"key2\"], \"content\": \"summary\"}}\n]\n" - "\nConversation content:\n{text}") - -# Prompt for personality analysis (NEW TEMPLATE) -PERSONALITY_ANALYSIS_SYSTEM_PROMPT = """You are a professional user preference analysis assistant. Your task is to analyze the user's personality preferences from the given dialogue based on the provided dimensions. - -For each dimension: -1. Carefully read the conversation and determine if the dimension is reflected. -2. If reflected, determine the user's preference level: High / Medium / Low, and briefly explain the reasoning, including time, people, and context if possible. -3. If the dimension is not reflected, do not extract or list it. - -Focus only on the user's preferences and traits for the personality analysis section. -Output only the user profile section. -""" - -PERSONALITY_ANALYSIS_USER_PROMPT = """Please analyze the latest user-AI conversation below based on the 90 personality preference dimensions. - -Here are the 90 dimensions and their explanations: - -[Psychological Model (Basic Needs & Personality)] -Extraversion: Preference for social activities. -Openness: Willingness to embrace new ideas and experiences. -Agreeableness: Tendency to be friendly and cooperative. -Conscientiousness: Responsibility and organizational ability. -Neuroticism: Emotional stability and sensitivity. -Physiological Needs: Concern for comfort and basic needs. -Need for Security: Emphasis on safety and stability. -Need for Belonging: Desire for group affiliation. -Need for Self-Esteem: Need for respect and recognition. -Cognitive Needs: Desire for knowledge and understanding. -Aesthetic Appreciation: Appreciation for beauty and art. -Self-Actualization: Pursuit of one's full potential. -Need for Order: Preference for cleanliness and organization. -Need for Autonomy: Preference for independent decision-making and action. -Need for Power: Desire to influence or control others. -Need for Achievement: Value placed on accomplishments. - -[AI Alignment Dimensions] -Helpfulness: Whether the AI's response is practically useful to the user. (This reflects user's expectation of AI) -Honesty: Whether the AI's response is truthful. (This reflects user's expectation of AI) -Safety: Avoidance of sensitive or harmful content. (This reflects user's expectation of AI) -Instruction Compliance: Strict adherence to user instructions. (This reflects user's expectation of AI) -Truthfulness: Accuracy and authenticity of content. (This reflects user's expectation of AI) -Coherence: Clarity and logical consistency of expression. (This reflects user's expectation of AI) -Complexity: Preference for detailed and complex information. -Conciseness: Preference for brief and clear responses. - -[Content Platform Interest Tags] -Science Interest: Interest in science topics. -Education Interest: Concern with education and learning. -Psychology Interest: Interest in psychology topics. -Family Concern: Interest in family and parenting. -Fashion Interest: Interest in fashion topics. -Art Interest: Engagement with or interest in art. -Health Concern: Concern with physical health and lifestyle. -Financial Management Interest: Interest in finance and budgeting. -Sports Interest: Interest in sports and physical activity. -Food Interest: Passion for cooking and cuisine. -Travel Interest: Interest in traveling and exploring new places. -Music Interest: Interest in music appreciation or creation. -Literature Interest: Interest in literature and reading. -Film Interest: Interest in movies and cinema. -Social Media Activity: Frequency and engagement with social media. -Tech Interest: Interest in technology and innovation. -Environmental Concern: Attention to environmental and sustainability issues. -History Interest: Interest in historical knowledge and topics. -Political Concern: Interest in political and social issues. -Religious Interest: Interest in religion and spirituality. -Gaming Interest: Enjoyment of video games or board games. -Animal Concern: Concern for animals or pets. -Emotional Expression: Preference for direct vs. restrained emotional expression. -Sense of Humor: Preference for humorous or serious communication style. -Information Density: Preference for detailed vs. concise information. -Language Style: Preference for formal vs. casual tone. -Practicality: Preference for practical advice vs. theoretical discussion. - -For each dimension that can be extracted from the conversation, list it in the following format: -Dimension ( Level(High / Medium / Low) ) -[Reasoning: Brief explanation including time, people, and context] -The reason for generation should be as brief as possible and highlight the key points. -Note: If a dimension cannot be inferred from the conversation, do not list it. - -Known User Traits (if any): -{known_user_traits} - -Latest User-AI Conversation: -{conversation} - -Please begin your analysis: -""" - -# Prompt for knowledge extraction (NEW) -KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are a knowledge extraction assistant. Your task is to extract user private data and assistant knowledge from conversations. - -Focus on: -1. User private data: personal information, preferences, or private facts about the user -2. Assistant knowledge: explicit statements about what the assistant did, provided, or demonstrated - -Be extremely concise and factual in your extractions. Use the shortest possible phrases. -""" - -KNOWLEDGE_EXTRACTION_USER_PROMPT = """Please extract user private data and assistant knowledge from the latest user-AI conversation below. - -Latest User-AI Conversation: -{conversation} - -【User Private Data】 -Extract personal information about the user. Be extremely concise - use shortest possible phrases: -- [Brief fact] -- [Brief fact] -- (If no private data found, write "None") - -【Assistant Knowledge】 -Extract what the assistant demonstrated. Use format "Assistant [action] at [time]". Be extremely brief: -- Assistant [brief action] at [time/context] -- Assistant [brief capability] during [brief context] -- (If no assistant knowledge found, write "None") - -Examples: -- Assistant recommended Interstellar on 2023-10-01 -- Assistant provided pasta recipe during cooking talk -- Assistant helped with Python code -- Assistant analyzed spreadsheet data -""" - -# Prompt for updating user profile (from utils.py, gpt_update_profile) -UPDATE_PROFILE_SYSTEM_PROMPT = "You are an expert in merging and updating user profiles. Integrate the new information into the old profile, maintaining consistency and improving the overall understanding of the user. Avoid redundancy. The new analysis is based on specific dimensions, try to incorporate these insights meaningfully." -UPDATE_PROFILE_USER_PROMPT = "Please update the following user profile based on the new analysis. If the old profile is empty or \"None\", create a new one based on the new analysis.\n\nOld User Profile:\n{old_profile}\n\nNew Analysis Data:\n{new_analysis}\n\nUpdated User Profile:" - -# Prompt for extracting theme (from utils.py, gpt_extract_theme) -EXTRACT_THEME_SYSTEM_PROMPT = "You are an expert in extracting the main theme from a text. Provide a concise theme." -EXTRACT_THEME_USER_PROMPT = "Please extract the main theme from the following text:\n{answer_text}\n\nTheme:" - -# Prompt for extracting keywords (from utils.py, llm_extract_keywords) -EXTRACT_KEYWORDS_SYSTEM_PROMPT = "You are an expert in keyword extraction. Extract only the most essential keywords from the text. Return 3-5 keywords maximum as a comma-separated list. Be extremely selective." -EXTRACT_KEYWORDS_USER_PROMPT = "Please extract the 3-5 most important keywords from the following text. Be very selective and concise:\n{text}\n\nKeywords:" - -# Prompt for conversation continuity check (from dynamic_update.py, _is_conversation_continuing) -CONTINUITY_CHECK_SYSTEM_PROMPT = "You are a conversation continuity detector. Return ONLY 'true' or 'false'." -CONTINUITY_CHECK_USER_PROMPT = ("Determine if these two conversation pages are continuous (true continuation without topic shift).\n" - "Return ONLY \"true\" or \"false\".\n\n" - "Previous Page:\nUser: {prev_user}\nAssistant: {prev_agent}\n\n" - "Current Page:\nUser: {curr_user}\nAssistant: {curr_agent}\n\n" - "Continuous?") - -# Prompt for generating meta info (from dynamic_update.py, _generate_meta_info) -META_INFO_SYSTEM_PROMPT = ("""You are a conversation meta-summary updater. Your task is to: -1. Preserve relevant context from previous meta-summary -2. Integrate new information from current dialogue -3. Output ONLY the updated summary (no explanations)""" ) -META_INFO_USER_PROMPT = ("""Update the conversation meta-summary by incorporating the new dialogue while maintaining continuity. - - Guidelines: - 1. Start from the previous meta-summary (if exists) - 2. Add/update information based on the new dialogue - 3. Keep it concise (1-2 sentences max) - 4. Maintain context coherence - - Previous Meta-summary: {last_meta} - New Dialogue: - {new_dialogue} - - Updated Meta-summary:""") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/retriever.py b/memoryos-mcp/memoryos/retriever.py deleted file mode 100644 index 9792cd0..0000000 --- a/memoryos-mcp/memoryos/retriever.py +++ /dev/null @@ -1,101 +0,0 @@ -from collections import deque -import heapq -from utils import get_timestamp, OpenAIClient # OpenAIClient might not be directly used here but good for consistency -from short_term import ShortTermMemory -from mid_term import MidTermMemory -from long_term import LongTermMemory -# from .updater import Updater # Updater is not directly used by Retriever - -class Retriever: - def __init__(self, - mid_term_memory: MidTermMemory, - long_term_memory: LongTermMemory, - assistant_long_term_memory: LongTermMemory = None, # Add assistant LTM - # client: OpenAIClient, # Not strictly needed if all LLM calls are within memory modules - queue_capacity=7): # Default from main_memoybank was 7 for retrieval_queue - # Short term memory is usually for direct context, not primary retrieval source here - # self.short_term_memory = short_term_memory - self.mid_term_memory = mid_term_memory - self.long_term_memory = long_term_memory - self.assistant_long_term_memory = assistant_long_term_memory # Store assistant LTM reference - # self.client = client - self.retrieval_queue_capacity = queue_capacity - # self.retrieval_queue = deque(maxlen=queue_capacity) # This was instance level, but retrieve returns it, so maybe not needed as instance var - - def retrieve_context(self, user_query: str, - user_id: str, # Needed for profile, can be used for context filtering if desired - segment_similarity_threshold=0.1, # From main_memoybank example - page_similarity_threshold=0.1, # From main_memoybank example - knowledge_threshold=0.01, # From main_memoybank example - top_k_sessions=5, # From MidTermMemory search default - top_k_knowledge=20 # Default for knowledge search - ): - print(f"Retriever: Starting retrieval for query: '{user_query[:50]}...'") - - # 1. Retrieve from Mid-Term Memory - # MidTermMemory.search_sessions now takes client for its internal keyword extraction - # It also returns a more structured result including scores. - matched_sessions = self.mid_term_memory.search_sessions( - query_text=user_query, - segment_similarity_threshold=segment_similarity_threshold, - page_similarity_threshold=page_similarity_threshold, - top_k_sessions=top_k_sessions - ) - - # Use a heap to get top N pages across all relevant sessions based on their scores - top_pages_heap = [] - page_counter = 0 # Add counter to ensure unique comparison - for session_match in matched_sessions: - for page_match in session_match.get("matched_pages", []): - page_data = page_match["page_data"] - page_score = page_match["score"] # Using the page score directly - - # Add session relevance score to page score or combine them? - # For now, using page_score. Could be: page_score * session_match["session_relevance_score"] - combined_score = page_score # Potentially adjust with session_relevance_score - - if len(top_pages_heap) < self.retrieval_queue_capacity: - heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) - page_counter += 1 - elif combined_score > top_pages_heap[0][0]: # If current page is better than the worst in heap - heapq.heappop(top_pages_heap) - heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) - page_counter += 1 - - # Extract pages from heap, already sorted by heapq property (smallest first) - # We want highest scores, so either use a max-heap or sort after popping from min-heap. - retrieved_mid_term_pages = [item[2] for item in sorted(top_pages_heap, key=lambda x: x[0], reverse=True)] - print(f"Retriever: Mid-term memory recalled {len(retrieved_mid_term_pages)} pages.") - - # 2. Retrieve from Long-Term User Knowledge (specific to the user) - # Assuming LongTermMemory for a user stores their specific knowledge/private data. - # The main LongTermMemory class in `long_term.py` has `search_user_knowledge` which doesn't need user_id as it's implicit in the instance - # However, if a single LTM instance handles multiple users, it would need user_id. - # For the Memoryos class, LTM will be user-specific or assistant-specific. - retrieved_user_knowledge = self.long_term_memory.search_user_knowledge( - user_query, threshold=knowledge_threshold, top_k=top_k_knowledge - ) - print(f"Retriever: Long-term user knowledge recalled {len(retrieved_user_knowledge)} items.") - - # 3. Retrieve from Long-Term Assistant Knowledge (general for the assistant) - # This requires a separate LTM instance or a method in LTM that queries a different knowledge base. - # In our Memoryos structure, there will be a separate LTM for assistant. - # For now, assuming self.long_term_memory is the USER's LTM. - # The Memoryos class will handle passing the correct LTM instance for assistant knowledge. - # This function will just return what it can from the provided LTM. - # If assistant_ltm is passed, it can be used: self.assistant_long_term_memory.search_assistant_knowledge(...) - retrieved_assistant_knowledge = [] - if self.assistant_long_term_memory: - retrieved_assistant_knowledge = self.assistant_long_term_memory.search_assistant_knowledge( - user_query, threshold=knowledge_threshold, top_k=top_k_knowledge - ) - print(f"Retriever: Long-term assistant knowledge recalled {len(retrieved_assistant_knowledge)} items.") - else: - print("Retriever: No assistant long-term memory provided, skipping assistant knowledge retrieval.") - - return { - "retrieved_pages": retrieved_mid_term_pages, # List of page dicts - "retrieved_user_knowledge": retrieved_user_knowledge, # List of knowledge entry dicts - "retrieved_assistant_knowledge": retrieved_assistant_knowledge, # List of assistant knowledge entry dicts - "retrieved_at": get_timestamp() - } \ No newline at end of file diff --git a/memoryos-mcp/memoryos/short_term.py b/memoryos-mcp/memoryos/short_term.py deleted file mode 100644 index c6ab7d7..0000000 --- a/memoryos-mcp/memoryos/short_term.py +++ /dev/null @@ -1,61 +0,0 @@ -import json -from collections import deque -from utils import get_timestamp, ensure_directory_exists - -class ShortTermMemory: - def __init__(self, file_path, max_capacity=10): - self.max_capacity = max_capacity - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.memory = deque(maxlen=max_capacity) - self.load() - - def add_qa_pair(self, qa_pair): - # Ensure timestamp exists, add if not - if 'timestamp' not in qa_pair or not qa_pair['timestamp']: - qa_pair["timestamp"] = get_timestamp() - - self.memory.append(qa_pair) - print(f"ShortTermMemory: Added QA. User: {qa_pair.get('user_input','')[:30]}...") - self.save() - - def get_all(self): - return list(self.memory) - - def is_full(self): - return len(self.memory) >= self.max_capacity # Use >= to be safe - - def pop_oldest(self): - if self.memory: - msg = self.memory.popleft() - print("ShortTermMemory: Evicted oldest QA pair.") - self.save() - return msg - return None - - def save(self): - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(list(self.memory), f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving ShortTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - # Ensure items are loaded correctly, especially if file was empty or malformed - if isinstance(data, list): - self.memory = deque(data, maxlen=self.max_capacity) - else: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: Loaded from {self.file_path}.") - except FileNotFoundError: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/updater.py b/memoryos-mcp/memoryos/updater.py deleted file mode 100644 index b8b6e70..0000000 --- a/memoryos-mcp/memoryos/updater.py +++ /dev/null @@ -1,199 +0,0 @@ -from utils import ( - generate_id, get_timestamp, - gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, - llm_extract_keywords -) -from short_term import ShortTermMemory -from mid_term import MidTermMemory -from long_term import LongTermMemory - -class Updater: - def __init__(self, - short_term_memory: ShortTermMemory, - mid_term_memory: MidTermMemory, - long_term_memory: LongTermMemory, - client: OpenAIClient, - topic_similarity_threshold=0.5, - llm_model="gpt-4o-mini"): - self.short_term_memory = short_term_memory - self.mid_term_memory = mid_term_memory - self.long_term_memory = long_term_memory - self.client = client - self.topic_similarity_threshold = topic_similarity_threshold - self.last_evicted_page_for_continuity = None # Tracks the actual last page object for continuity checks - self.llm_model = llm_model - - def _update_linked_pages_meta_info(self, start_page_id, new_meta_info): - """ - Updates meta_info for a chain of connected pages starting from start_page_id. - This is a simplified version. Assumes that once a chain is broken (no pre_page), - we don't need to go further back. Updates forward as well. - """ - # Go backward - q = [start_page_id] - visited = {start_page_id} - - head = 0 - while head < len(q): - current_page_id = q[head] - head += 1 - page = self.mid_term_memory.get_page_by_id(current_page_id) - if page: - page["meta_info"] = new_meta_info - # Check previous page - prev_id = page.get("pre_page") - if prev_id and prev_id not in visited: - q.append(prev_id) - visited.add(prev_id) - # Check next page - next_id = page.get("next_page") - if next_id and next_id not in visited: - q.append(next_id) - visited.add(next_id) - if q: # If any pages were updated - self.mid_term_memory.save() # Save mid-term memory after updates - - def process_short_term_to_mid_term(self): - evicted_qas = [] - while self.short_term_memory.is_full(): - qa = self.short_term_memory.pop_oldest() - if qa and qa.get("user_input") and qa.get("agent_response"): - evicted_qas.append(qa) - - if not evicted_qas: - print("Updater: No QAs evicted from short-term memory.") - return - - print(f"Updater: Processing {len(evicted_qas)} QAs from short-term to mid-term.") - - # 1. Create page structures and handle continuity within the evicted batch - current_batch_pages = [] - temp_last_page_in_batch = self.last_evicted_page_for_continuity # Carry over from previous batch if any - - for qa_pair in evicted_qas: - current_page_obj = { - "page_id": generate_id("page"), - "user_input": qa_pair.get("user_input", ""), - "agent_response": qa_pair.get("agent_response", ""), - "timestamp": qa_pair.get("timestamp", get_timestamp()), - "preloaded": False, # Default for new pages from short-term - "analyzed": False, # Default for new pages from short-term - "pre_page": None, - "next_page": None, - "meta_info": None - } - - is_continuous = check_conversation_continuity(temp_last_page_in_batch, current_page_obj, self.client, model=self.llm_model) - - if is_continuous and temp_last_page_in_batch: - current_page_obj["pre_page"] = temp_last_page_in_batch["page_id"] - # The actual next_page for temp_last_page_in_batch will be set when it's stored in mid-term - # or if it's already there, it needs an update. This linking is tricky. - # For now, we establish the link from current to previous. - # MidTermMemory's update_page_connections can fix the other side if pages are already there. - - # Meta info generation based on continuity - last_meta = temp_last_page_in_batch.get("meta_info") - new_meta = generate_page_meta_info(last_meta, current_page_obj, self.client, model=self.llm_model) - current_page_obj["meta_info"] = new_meta - # If temp_last_page_in_batch was part of a chain, its meta_info and subsequent ones should update. - # This implies that meta_info should perhaps be updated more globally or propagated. - # For now, new_meta applies to current_page_obj and potentially its chain. - # We can call _update_linked_pages_meta_info if temp_last_page_in_batch is in mid-term already. - if temp_last_page_in_batch.get("page_id") and self.mid_term_memory.get_page_by_id(temp_last_page_in_batch["page_id"]): - self._update_linked_pages_meta_info(temp_last_page_in_batch["page_id"], new_meta) - else: - # Start of a new chain or no previous page - current_page_obj["meta_info"] = generate_page_meta_info(None, current_page_obj, self.client, model=self.llm_model) - - current_batch_pages.append(current_page_obj) - temp_last_page_in_batch = current_page_obj # Update for the next iteration in this batch - - # Update the global last evicted page for the next run of this method - if current_batch_pages: - self.last_evicted_page_for_continuity = current_batch_pages[-1] - - # 2. Consolidate text from current_batch_pages for multi-summary - if not current_batch_pages: - return - - input_text_for_summary = "\n".join([ - f"User: {p.get('user_input','')}\nAssistant: {p.get('agent_response','')}" - for p in current_batch_pages - ]) - - print("Updater: Generating multi-topic summary for the evicted batch...") - multi_summary_result = gpt_generate_multi_summary(input_text_for_summary, self.client, model=self.llm_model) - - # 3. Insert pages into MidTermMemory based on summaries - if multi_summary_result and multi_summary_result.get("summaries"): - for summary_item in multi_summary_result["summaries"]: - theme_summary = summary_item.get("content", "General summary of recent interactions.") - theme_keywords = summary_item.get("keywords", []) - print(f"Updater: Processing theme '{summary_item.get('theme')}' for mid-term insertion.") - - # Pass the already processed pages (with IDs, embeddings to be added by MidTermMemory if not present) - self.mid_term_memory.insert_pages_into_session( - summary_for_new_pages=theme_summary, - keywords_for_new_pages=theme_keywords, - pages_to_insert=current_batch_pages, # These pages now have pre_page, next_page, meta_info set up - similarity_threshold=self.topic_similarity_threshold - ) - else: - # Fallback: if no summaries, add as one session or handle as a single block - print("Updater: No specific themes from multi-summary. Adding batch as a general session.") - fallback_summary = "General conversation segment from short-term memory." - fallback_keywords = llm_extract_keywords(input_text_for_summary, self.client, model=self.llm_model) if input_text_for_summary else [] - self.mid_term_memory.insert_pages_into_session( - summary_for_new_pages=fallback_summary, - keywords_for_new_pages=list(fallback_keywords), - pages_to_insert=current_batch_pages, - similarity_threshold=self.topic_similarity_threshold - ) - - # After pages are in mid-term, ensure their connections are doubly linked if needed. - # MidTermMemory.insert_pages_into_session should ideally handle this internally - # or we might need a separate pass to solidify connections after all insertions. - for page in current_batch_pages: - if page.get("pre_page"): - self.mid_term_memory.update_page_connections(page["pre_page"], page["page_id"]) - if page.get("next_page"): - self.mid_term_memory.update_page_connections(page["page_id"], page["next_page"]) # This seems redundant if next is set by prior - if current_batch_pages: # Save if any pages were processed - self.mid_term_memory.save() - - def update_long_term_from_analysis(self, user_id, profile_analysis_result): - """ - Updates long-term memory based on the results of a personality/knowledge analysis. - profile_analysis_result is expected to be a dict with keys like "profile", "private", "assistant_knowledge". - """ - if not profile_analysis_result: - print("Updater: No analysis result provided for long-term update.") - return - - new_profile_text = profile_analysis_result.get("profile") - if new_profile_text and new_profile_text.lower() != "none": - print(f"Updater: Updating user profile for {user_id} in LongTermMemory.") - current_profile = self.long_term_memory.get_raw_user_profile(user_id) - if current_profile and current_profile.lower() != "none": - updated_profile = gpt_update_profile(current_profile, new_profile_text, self.client) - else: - updated_profile = new_profile_text # First profile - self.long_term_memory.update_user_profile(user_id, updated_profile) - - user_private_knowledge = profile_analysis_result.get("private") - if user_private_knowledge and user_private_knowledge.lower() != "none": - print(f"Updater: Adding user private knowledge for {user_id} to LongTermMemory.") - # Split if multiple lines, assuming each line is a distinct piece of knowledge - for line in user_private_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.long_term_memory.add_user_knowledge(line.strip()) - - assistant_knowledge_text = profile_analysis_result.get("assistant_knowledge") - if assistant_knowledge_text and assistant_knowledge_text.lower() != "none": - print("Updater: Adding assistant knowledge to LongTermMemory.") - for line in assistant_knowledge_text.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.long_term_memory.add_assistant_knowledge(line.strip()) - - # LongTermMemory.save() is called by its add/update methods \ No newline at end of file diff --git a/memoryos-mcp/memoryos/utils.py b/memoryos-mcp/memoryos/utils.py deleted file mode 100644 index 2bacbc4..0000000 --- a/memoryos-mcp/memoryos/utils.py +++ /dev/null @@ -1,227 +0,0 @@ -import time -import uuid -import openai -import numpy as np -from sentence_transformers import SentenceTransformer -import json -import os -import prompts # Assuming prompts.py is in the same directory -from openai import OpenAI -# ---- OpenAI Client ---- -class OpenAIClient: - def __init__(self, api_key, base_url=None): - self.api_key = api_key - self.base_url = base_url if base_url else "https://api.openai.com/v1" - # The openai library looks for OPENAI_API_KEY and OPENAI_BASE_URL env vars by default - # or they can be passed directly to the client. - # For simplicity and explicit control, we'll pass them to the client constructor. - self.client = OpenAI(api_key=self.api_key, base_url=self.base_url) - - def chat_completion(self, model, messages, temperature=0.7, max_tokens=2000): - print(f"Calling OpenAI API. Model: {model}") - try: - response = self.client.chat.completions.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens - ) - return response.choices[0].message.content.strip() - except Exception as e: - print(f"Error calling OpenAI API: {e}") - # Fallback or error handling - return "Error: Could not get response from LLM." - - -# ---- Basic Utilities ---- -def get_timestamp(): - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - -def generate_id(prefix="id"): - return f"{prefix}_{uuid.uuid4().hex[:8]}" - -def ensure_directory_exists(path): - os.makedirs(os.path.dirname(path), exist_ok=True) - -# ---- Embedding Utilities ---- -_model_cache = {} - -def get_embedding(text, model_name="all-MiniLM-L6-v2"): - if model_name not in _model_cache: - print(f"Loading sentence transformer model: {model_name}") - _model_cache[model_name] = SentenceTransformer(model_name) - model = _model_cache[model_name] - embedding = model.encode([text], convert_to_numpy=True)[0] - return embedding - -def normalize_vector(vec): - vec = np.array(vec, dtype=np.float32) - norm = np.linalg.norm(vec) - if norm == 0: - return vec - return vec / norm - -# ---- Time Decay Function ---- -def compute_time_decay(event_timestamp_str, current_timestamp_str, tau_hours=24): - from datetime import datetime - fmt = "%Y-%m-%d %H:%M:%S" - try: - t_event = datetime.strptime(event_timestamp_str, fmt) - t_current = datetime.strptime(current_timestamp_str, fmt) - delta_hours = (t_current - t_event).total_seconds() / 3600.0 - return np.exp(-delta_hours / tau_hours) - except ValueError: # Handle cases where timestamp might be invalid - return 0.1 # Default low recency - - -# ---- LLM-based Utility Functions ---- - -def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"): - dialog_text = "\n".join([f"User: {d.get('user_input','')} Assistant: {d.get('agent_response','')}" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.SUMMARIZE_DIALOGS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.SUMMARIZE_DIALOGS_USER_PROMPT.format(dialog_text=dialog_text)} - ] - print("Calling LLM to generate topic summary...") - return client.chat_completion(model=model, messages=messages) - -def gpt_generate_multi_summary(text, client: OpenAIClient, model="gpt-4o-mini"): - messages = [ - {"role": "system", "content": prompts.MULTI_SUMMARY_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.MULTI_SUMMARY_USER_PROMPT.format(text=text)} - ] - print("Calling LLM to generate multi-topic summary...") - response_text = client.chat_completion(model=model, messages=messages) - try: - summaries = json.loads(response_text) - except json.JSONDecodeError: - print(f"Warning: Could not parse multi-summary JSON: {response_text}") - summaries = [] # Return empty list or a default structure - return {"input": text, "summaries": summaries} - - -def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", known_user_traits="None"): - """Analyze user personality profile from dialogs""" - conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.PERSONALITY_ANALYSIS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.PERSONALITY_ANALYSIS_USER_PROMPT.format( - conversation=conversation, - known_user_traits=known_user_traits - )} - ] - print("Calling LLM for user profile analysis...") - result_text = client.chat_completion(model=model, messages=messages) - return result_text.strip() if result_text else "None" - - -def gpt_knowledge_extraction(dialogs, client: OpenAIClient, model="gpt-4o-mini"): - """Extract user private data and assistant knowledge from dialogs""" - conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.KNOWLEDGE_EXTRACTION_USER_PROMPT.format( - conversation=conversation - )} - ] - print("Calling LLM for knowledge extraction...") - result_text = client.chat_completion(model=model, messages=messages) - - private_data = "None" - assistant_knowledge = "None" - - try: - if "【User Private Data】" in result_text: - private_data_start = result_text.find("【User Private Data】") + len("【User Private Data】") - if "【Assistant Knowledge】" in result_text: - private_data_end = result_text.find("【Assistant Knowledge】") - private_data = result_text[private_data_start:private_data_end].strip() - - assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") - assistant_knowledge = result_text[assistant_knowledge_start:].strip() - else: - private_data = result_text[private_data_start:].strip() - elif "【Assistant Knowledge】" in result_text: - assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") - assistant_knowledge = result_text[assistant_knowledge_start:].strip() - - except Exception as e: - print(f"Error parsing knowledge extraction: {e}. Raw result: {result_text}") - - return { - "private": private_data if private_data else "None", - "assistant_knowledge": assistant_knowledge if assistant_knowledge else "None" - } - - -# Keep the old function for backward compatibility, but mark as deprecated -def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", known_user_traits="None"): - """ - DEPRECATED: Use gpt_user_profile_analysis and gpt_knowledge_extraction instead. - This function is kept for backward compatibility only. - """ - # Call the new functions - profile = gpt_user_profile_analysis(dialogs, client, model, known_user_traits) - knowledge_data = gpt_knowledge_extraction(dialogs, client, model) - - return { - "profile": profile, - "private": knowledge_data["private"], - "assistant_knowledge": knowledge_data["assistant_knowledge"] - } - - -def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="gpt-4o-mini"): - messages = [ - {"role": "system", "content": prompts.UPDATE_PROFILE_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.UPDATE_PROFILE_USER_PROMPT.format(old_profile=old_profile, new_analysis=new_analysis)} - ] - print("Calling LLM to update user profile...") - return client.chat_completion(model=model, messages=messages) - -def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"): - messages = [ - {"role": "system", "content": prompts.EXTRACT_THEME_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.EXTRACT_THEME_USER_PROMPT.format(answer_text=answer_text)} - ] - print("Calling LLM to extract theme...") - return client.chat_completion(model=model, messages=messages) - -def llm_extract_keywords(text, client: OpenAIClient, model="gpt-4o-mini"): - messages = [ - {"role": "system", "content": prompts.EXTRACT_KEYWORDS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.EXTRACT_KEYWORDS_USER_PROMPT.format(text=text)} - ] - print("Calling LLM to extract keywords...") - response = client.chat_completion(model=model, messages=messages) - return [kw.strip() for kw in response.split(',') if kw.strip()] - -# ---- Functions from dynamic_update.py (to be used by Updater class) ---- -def check_conversation_continuity(previous_page, current_page, client: OpenAIClient, model="gpt-4o-mini"): - prev_user = previous_page.get("user_input", "") if previous_page else "" - prev_agent = previous_page.get("agent_response", "") if previous_page else "" - - user_prompt = prompts.CONTINUITY_CHECK_USER_PROMPT.format( - prev_user=prev_user, - prev_agent=prev_agent, - curr_user=current_page.get("user_input", ""), - curr_agent=current_page.get("agent_response", "") - ) - messages = [ - {"role": "system", "content": prompts.CONTINUITY_CHECK_SYSTEM_PROMPT}, - {"role": "user", "content": user_prompt} - ] - response = client.chat_completion(model=model, messages=messages, temperature=0.0, max_tokens=10) - return response.strip().lower() == "true" - -def generate_page_meta_info(last_page_meta, current_page, client: OpenAIClient, model="gpt-4o-mini"): - current_conversation = f"User: {current_page.get('user_input', '')}\nAssistant: {current_page.get('agent_response', '')}" - user_prompt = prompts.META_INFO_USER_PROMPT.format( - last_meta=last_page_meta if last_page_meta else "None", - new_dialogue=current_conversation - ) - messages = [ - {"role": "system", "content": prompts.META_INFO_SYSTEM_PROMPT}, - {"role": "user", "content": user_prompt} - ] - return client.chat_completion(model=model, messages=messages, temperature=0.3, max_tokens=100).strip() \ No newline at end of file diff --git a/memoryos-mcp/requirements.txt b/memoryos-mcp/requirements.txt deleted file mode 100644 index 40f8d3f..0000000 --- a/memoryos-mcp/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ - -mcp - -openai>=1.0.0 - -numpy==1.24 - -sentence-transformers>=2.2.0 - -faiss-gpu>=1.7.0 - -# 时间和日期处理 -python-dateutil>=2.8.0 - -typing-extensions>=4.0.0 - -# 可选:如果GPU不可用,可以手动安装CPU版本 -# pip uninstall faiss-gpu -# pip install faiss-cpu>=1.7.0 \ No newline at end of file diff --git a/memoryos-mcp/server_new.py b/memoryos-mcp/server_new.py deleted file mode 100644 index 5ebfd60..0000000 --- a/memoryos-mcp/server_new.py +++ /dev/null @@ -1,292 +0,0 @@ - -import sys -import os -import json -import argparse -from typing import Any, Dict, Optional, List -# 确保当前目录在sys.path中,以便导入memoryos模块 -sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'memoryos')) - -try: - from mcp.server.fastmcp import FastMCP -except ImportError as e: - print(f"ERROR: Failed to import FastMCP. Exception: {e}", file=sys.stderr) - print("请安装最新版本的MCP: pip install --upgrade mcp", file=sys.stderr) - sys.exit(1) - -try: - from memoryos import Memoryos - from utils import get_timestamp -except ImportError as e: - print(f"无法导入MemoryOS模块: {e}", file=sys.stderr) - print("请确保项目结构正确,memoryos目录应包含所有必要文件", file=sys.stderr) - sys.exit(1) - -# MemoryOS实例 - 将在初始化时设置 -memoryos_instance: Optional[Memoryos] = None - -def init_memoryos(config_path: str) -> Memoryos: - """初始化MemoryOS实例""" - if not os.path.exists(config_path): - raise FileNotFoundError(f"配置文件不存在: {config_path}") - - with open(config_path, 'r', encoding='utf-8') as f: - config = json.load(f) - - required_fields = ['user_id', 'openai_api_key', 'data_storage_path'] - for field in required_fields: - if field not in config: - raise ValueError(f"配置文件缺少必需字段: {field}") - - return Memoryos( - user_id=config['user_id'], - openai_api_key=config['openai_api_key'], - data_storage_path=config['data_storage_path'], - openai_base_url=config.get('openai_base_url'), - assistant_id=config.get('assistant_id', 'default_assistant_profile'), - short_term_capacity=config.get('short_term_capacity', 10), - mid_term_capacity=config.get('mid_term_capacity', 2000), - long_term_knowledge_capacity=config.get('long_term_knowledge_capacity', 100), - retrieval_queue_capacity=config.get('retrieval_queue_capacity', 7), - mid_term_heat_threshold=config.get('mid_term_heat_threshold', 5.0), - llm_model=config.get('llm_model', 'gpt-4o-mini') - ) - -# 创建FastMCP服务器实例 -mcp = FastMCP("MemoryOS") - -@mcp.tool() -def add_memory(user_input: str, agent_response: str, timestamp: Optional[str] = None, meta_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: - """ - 向MemoryOS系统添加新的记忆(用户输入和助手回应的对话对) - - Args: - user_input: 用户的输入或问题 - agent_response: 助手的回应 - timestamp: 时间戳(可选,格式:YYYY-MM-DD HH:MM:SS) - meta_data: 可选的元数据(JSON对象) - - Returns: - 包含操作结果的字典 - """ - global memoryos_instance - - if memoryos_instance is None: - return { - "status": "error", - "message": "MemoryOS is not initialized. Please check the configuration file." - } - - try: - if not user_input or not agent_response: - return { - "status": "error", - "message": "user_input and agent_response are required" - } - - memoryos_instance.add_memory( - user_input=user_input, - agent_response=agent_response, - timestamp=timestamp, - meta_data=meta_data - ) - - result = { - "status": "success", - "message": "Memory has been successfully added to MemoryOS", - "timestamp": timestamp or get_timestamp(), - "details": { - "user_input_length": len(user_input), - "agent_response_length": len(agent_response), - "has_meta_data": meta_data is not None - } - } - - return result - - except Exception as e: - return { - "status": "error", - "message": f"Error adding memory: {str(e)}" - } - -@mcp.tool() -def retrieve_memory(query: str, relationship_with_user: str = "friend", style_hint: str = "", max_results: int = 10) -> Dict[str, Any]: - """ - 根据查询从MemoryOS检索相关的记忆和上下文信息,包括短期记忆、中期记忆和长期知识 - - Args: - query: 检索查询,描述要寻找的信息 - relationship_with_user: 与用户的关系类型(如:friend, assistant, colleague等) - style_hint: 回应风格提示 - max_results: 返回的最大结果数量 - - Returns: - 包含检索结果的字典,包括: - - short_term_memory: 当前短期记忆中的所有QA对 - - retrieved_pages: 从中期记忆检索的相关页面 - - retrieved_user_knowledge: 从用户长期知识库检索的相关条目 - - retrieved_assistant_knowledge: 从助手知识库检索的相关条目 - """ - global memoryos_instance - - if memoryos_instance is None: - return { - "status": "error", - "message": "MemoryOS is not initialized. Please check the configuration file." - } - - try: - if not query: - return { - "status": "error", - "message": "query parameter is required" - } - - # 使用retriever获取相关上下文 - retrieval_results = memoryos_instance.retriever.retrieve_context( - user_query=query, - user_id=memoryos_instance.user_id - ) - - # 获取短期记忆内容 - short_term_history = memoryos_instance.short_term_memory.get_all() - - # 获取用户画像 - user_profile = memoryos_instance.get_user_profile_summary() - - # 组织返回结果 - result = { - "status": "success", - "query": query, - "timestamp": get_timestamp(), - "user_profile": user_profile if user_profile and user_profile.lower() != "none" else "No detailed user profile", - "short_term_memory": short_term_history, - "short_term_count": len(short_term_history), - # "retrieved_pages": retrieval_results["retrieved_pages"][:max_results], - # "retrieved_user_knowledge": retrieval_results["retrieved_user_knowledge"][:max_results], - # "retrieved_assistant_knowledge": retrieval_results["retrieved_assistant_knowledge"][:max_results], - "retrieved_pages": [{ - 'user_input': page['user_input'], - 'agent_response': page['agent_response'], - 'timestamp': page['timestamp'], - 'meta_info': page['meta_info'] - } for page in retrieval_results["retrieved_pages"][:max_results]], - - "retrieved_user_knowledge": [{ - 'knowledge': k['knowledge'], - 'timestamp': k['timestamp'] - } for k in retrieval_results["retrieved_user_knowledge"][:max_results]], - - "retrieved_assistant_knowledge": [{ - 'knowledge': k['knowledge'], - 'timestamp': k['timestamp'] - } for k in retrieval_results["retrieved_assistant_knowledge"][:max_results]], - # "total_pages_found": len(retrieval_results["retrieved_pages"]), - # "total_user_knowledge_found": len(retrieval_results["retrieved_user_knowledge"]), - # "total_assistant_knowledge_found": len(retrieval_results["retrieved_assistant_knowledge"]) - } - - return result - - except Exception as e: - return { - "status": "error", - "message": f"Error retrieving memory: {str(e)}" - } - -@mcp.tool() -def get_user_profile(include_knowledge: bool = True, include_assistant_knowledge: bool = False) -> Dict[str, Any]: - """ - 获取用户的画像信息,包括个性特征、偏好和相关知识 - - Args: - include_knowledge: 是否包括用户相关的知识条目 - include_assistant_knowledge: 是否包括助手知识库 - - Returns: - 包含用户画像信息的字典 - """ - global memoryos_instance - - if memoryos_instance is None: - return { - "status": "error", - "message": "MemoryOS is not initialized. Please check the configuration file." - } - - try: - # 获取用户画像 - user_profile = memoryos_instance.get_user_profile_summary() - - result = { - "status": "success", - "timestamp": get_timestamp(), - "user_id": memoryos_instance.user_id, - "assistant_id": memoryos_instance.assistant_id, - "user_profile": user_profile if user_profile and user_profile.lower() != "none" else "No detailed user profile" - } - - if include_knowledge: - user_knowledge = memoryos_instance.user_long_term_memory.get_user_knowledge() - result["user_knowledge"] = [ - { - "knowledge": item["knowledge"], - "timestamp": item["timestamp"] - } - for item in user_knowledge - ] - result["user_knowledge_count"] = len(user_knowledge) - - if include_assistant_knowledge: - assistant_knowledge = memoryos_instance.get_assistant_knowledge_summary() - result["assistant_knowledge"] = [ - { - "knowledge": item["knowledge"], - "timestamp": item["timestamp"] - } - for item in assistant_knowledge - ] - result["assistant_knowledge_count"] = len(assistant_knowledge) - - return result - - except Exception as e: - return { - "status": "error", - "message": f"Error getting user profile: {str(e)}" - } - -def main(): - """主函数""" - parser = argparse.ArgumentParser(description="MemoryOS MCP Server") - parser.add_argument( - "--config", - type=str, - default="config.json", - help="配置文件路径 (默认: config.json)" - ) - - args = parser.parse_args() - - global memoryos_instance - - try: - # 初始化MemoryOS - memoryos_instance = init_memoryos(args.config) - print(f"MemoryOS MCP Server 已启动,用户ID: {memoryos_instance.user_id}", file=sys.stderr) - print(f"配置文件: {args.config}", file=sys.stderr) - - # 启动MCP服务器 - 使用stdio传输 - mcp.run(transport="stdio") - - except KeyboardInterrupt: - print("服务器被用户中断", file=sys.stderr) - except Exception as e: - print(f"启动服务器时发生错误: {e}", file=sys.stderr) - import traceback - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/memoryos-mcp/test_comprehensive.py b/memoryos-mcp/test_comprehensive.py deleted file mode 100644 index a56239f..0000000 --- a/memoryos-mcp/test_comprehensive.py +++ /dev/null @@ -1,381 +0,0 @@ - -""" -MemoryOS MCP 服务器综合测试客户端 -使用官方MCP Python SDK进行测试 -""" - -import asyncio -import json -import subprocess -import sys -import time -from typing import Dict, Any, Optional -from pathlib import Path - -# 尝试导入官方MCP客户端 -try: - from mcp import ClientSession, StdioServerParameters - from mcp.client.stdio import stdio_client - from mcp import types -except ImportError as e: - print(f"❌ 无法导入MCP客户端库: {e}") - print("请安装官方MCP SDK: pip install mcp") - sys.exit(1) - -class MemoryOSMCPTester: - """MemoryOS MCP服务器测试类""" - - def __init__(self, server_script: str = "server_new.py", config_file: str = "config.json"): - self.server_script = Path(server_script) - self.config_file = Path(config_file) - - # 验证文件存在 - if not self.server_script.exists(): - raise FileNotFoundError(f"服务器脚本不存在: {self.server_script}") - if not self.config_file.exists(): - raise FileNotFoundError(f"配置文件不存在: {self.config_file}") - - async def test_server_initialization(self): - """测试服务器初始化""" - print("\n🔄 测试1: 服务器初始化") - - server_params = StdioServerParameters( - command=sys.executable, - args=[str(self.server_script), "--config", str(self.config_file)], - env=None - ) - - try: - async with stdio_client(server_params) as (read_stream, write_stream): - async with ClientSession(read_stream, write_stream) as session: - # 初始化连接 - await session.initialize() - print("✅ 服务器初始化成功") - return True - except Exception as e: - print(f"❌ 服务器初始化失败: {e}") - return False - - async def test_tools_discovery(self): - """测试工具发现""" - print("\n🔧 测试2: 工具发现") - - server_params = StdioServerParameters( - command=sys.executable, - args=[str(self.server_script), "--config", str(self.config_file)], - env=None - ) - - try: - async with stdio_client(server_params) as (read_stream, write_stream): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - - # 获取工具列表 - tools_result = await session.list_tools() - tools = tools_result.tools if hasattr(tools_result, 'tools') else [] - - print(f"✅ 发现 {len(tools)} 个工具:") - expected_tools = ["add_memory", "retrieve_memory", "get_user_profile"] - - for tool in tools: - print(f" - {tool.name}: {tool.description}") - if tool.name in expected_tools: - expected_tools.remove(tool.name) - - if expected_tools: - print(f"⚠️ 缺少预期工具: {expected_tools}") - else: - print("✅ 所有预期工具都已找到") - - return tools - except Exception as e: - print(f"❌ 工具发现失败: {e}") - return [] - - async def test_add_memory_tool(self): - """测试添加记忆工具 - 20轮测试""" - print("\n💾 测试3: 添加记忆工具 (20轮测试)") - - server_params = StdioServerParameters( - command=sys.executable, - args=[str(self.server_script), "--config", str(self.config_file)], - env=None - ) - - # 准备20轮测试数据 - test_conversations = [ - ("Hello, I'm a new user", "Welcome to MemoryOS! I'm your AI assistant."), - ("I like programming", "Great! Programming is a very interesting skill. What programming language do you mainly use?"), - ("I often use Python", "Python is a great language! Simple yet powerful."), - ("I'm learning machine learning", "Machine learning has great prospects! Which field are you focusing on?"), - ("I'm interested in natural language processing", "NLP is a fascinating field! It has many practical applications."), - ("I want to understand how ChatGPT works", "ChatGPT is based on the Transformer architecture and uses massive pre-training data."), - ("What is the attention mechanism?", "The attention mechanism allows models to focus on the most relevant parts of the input sequence."), - ("I want to learn deep learning", "For deep learning beginners, I suggest starting with neural network fundamentals."), - ("Recommend some learning resources", "I recommend classic resources like 'Deep Learning' book and CS231n course."), - ("I have a project idea", "Awesome! Share your project idea and I'll help you analyze it."), - ("I want to build an intelligent dialogue system", "Intelligent dialogue systems need to consider intent recognition, context understanding and other technologies."), - ("How to handle multi-turn conversations?", "Multi-turn conversations require maintaining dialogue state and context memory."), - ("How does MemoryOS work?", "MemoryOS maintains long-term dialogue context through hierarchical memory management."), - ("What's the difference between short-term and long-term memory", "Short-term memory stores current conversations, while long-term memory saves important user information."), - ("How to optimize memory retrieval?", "You can use vector similarity search and semantic understanding to improve retrieval accuracy."), - ("I want to contribute code", "Welcome to contribute! You can start by reading documentation and solving issues."), - ("What open source projects do you recommend?", "I recommend following popular AI open source projects like Hugging Face and LangChain."), - ("My interest is computer vision", "Computer vision covers areas like image recognition and object detection."), - ("Advice on choosing deep learning frameworks", "Both PyTorch and TensorFlow are great. PyTorch is better for research, TensorFlow for production."), - ("Thank you for your help!", "You're welcome! I'm glad I could help you, looking forward to our next conversation.") - ] - - try: - async with stdio_client(server_params) as (read_stream, write_stream): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - - success_count = 0 - - # 执行20轮添加记忆测试 - for i, (user_input, agent_response) in enumerate(test_conversations, 1): - print(f" 第{i:2d}轮: 添加记忆...") - - test_data = { - "user_input": user_input, - "agent_response": agent_response - # 不包含 meta_data - } - - result = await session.call_tool("add_memory", test_data) - - if hasattr(result, 'content') and result.content: - content = result.content[0] - if hasattr(content, 'text'): - response = json.loads(content.text) - if response.get("status") == "success": - success_count += 1 - print(f" 第{i:2d}轮: ✅ 成功") - else: - print(f" 第{i:2d}轮: ❌ 失败 - {response.get('message', '未知错误')}") - else: - print(f" 第{i:2d}轮: ❌ 失败 - 无效响应格式") - else: - print(f" 第{i:2d}轮: ❌ 失败 - 无响应内容") - - # 短暂延迟,避免过快请求 - await asyncio.sleep(0.1) - - print(f"\n✅ 记忆添加测试完成: {success_count}/{len(test_conversations)} 成功") - return success_count == len(test_conversations) - - except Exception as e: - print(f"❌ 记忆添加测试失败: {e}") - return False - - async def test_retrieve_memory_tool(self): - """测试检索记忆工具""" - print("\n🔍 测试4: 检索记忆工具") - - server_params = StdioServerParameters( - command=sys.executable, - args=[str(self.server_script), "--config", str(self.config_file)], - env=None - ) - - # 准备多个检索查询 - test_queries = [ - ("user's programming skills", "Find user's programming related information"), - ("machine learning related content", "Retrieve machine learning and AI related conversations"), - ("learning resource recommendations", "Find recommended learning resources"), - ("project related discussions", "Retrieve conversations about projects"), - ("user's interests and hobbies", "Understand user's interests and preferences") - ] - - try: - async with stdio_client(server_params) as (read_stream, write_stream): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - - success_count = 0 - - # 执行多个检索查询测试 - for i, (query, description) in enumerate(test_queries, 1): - print(f" 第{i}个查询: {description}") - - test_query = { - "query": query, - "relationship_with_user": "friend", - "style_hint": "helpful and informative", - "max_results": 10 - } - - result = await session.call_tool("retrieve_memory", test_query) - - if hasattr(result, 'content') and result.content: - content = result.content[0] - if hasattr(content, 'text'): - response = json.loads(content.text) - if response.get("status") == "success": - success_count += 1 - print(f" 第{i}个查询: ✅ 成功") - print(f" - 检索到页面数: {response.get('total_pages_found', 0)}") - print(f" - 用户知识数: {response.get('total_user_knowledge_found', 0)}") - print(f" - 助手知识数: {response.get('total_assistant_knowledge_found', 0)}") - else: - print(f" 第{i}个查询: ❌ 失败 - {response.get('message', '未知错误')}") - else: - print(f" 第{i}个查询: ❌ 失败 - 无效响应格式") - else: - print(f" 第{i}个查询: ❌ 失败 - 无响应内容") - - # 短暂延迟 - await asyncio.sleep(0.1) - - print(f"\n✅ 记忆检索测试完成: {success_count}/{len(test_queries)} 成功") - return success_count >= len(test_queries) // 2 # 至少一半成功即可 - - except Exception as e: - print(f"❌ 记忆检索测试失败: {e}") - return False - - async def test_get_user_profile_tool(self): - """测试获取用户画像工具""" - print("\n👤 测试5: 获取用户画像工具") - - server_params = StdioServerParameters( - command=sys.executable, - args=[str(self.server_script), "--config", str(self.config_file)], - env=None - ) - - # 准备不同的参数组合测试 - test_configs = [ - ({"include_knowledge": True, "include_assistant_knowledge": False}, "包含用户知识"), - ({"include_knowledge": False, "include_assistant_knowledge": True}, "包含助手知识"), - ({"include_knowledge": True, "include_assistant_knowledge": True}, "包含所有知识"), - ({"include_knowledge": False, "include_assistant_knowledge": False}, "仅基本画像") - ] - - try: - async with stdio_client(server_params) as (read_stream, write_stream): - async with ClientSession(read_stream, write_stream) as session: - await session.initialize() - - success_count = 0 - - # 执行不同配置的用户画像测试 - for i, (test_params, description) in enumerate(test_configs, 1): - print(f" 第{i}种配置: {description}") - - result = await session.call_tool("get_user_profile", test_params) - - if hasattr(result, 'content') and result.content: - content = result.content[0] - if hasattr(content, 'text'): - response = json.loads(content.text) - if response.get("status") == "success": - success_count += 1 - print(f" 第{i}种配置: ✅ 成功") - print(f" - 用户ID: {response.get('user_id', 'N/A')}") - print(f" - 助手ID: {response.get('assistant_id', 'N/A')}") - - # 显示用户画像信息 - user_profile = response.get('user_profile', '暂无') - if len(user_profile) > 100: - user_profile = user_profile[:100] + "..." - print(f" - 用户画像: {user_profile}") - - # 显示知识条目数量 - if 'user_knowledge_count' in response: - print(f" - 用户知识条目数: {response.get('user_knowledge_count', 0)}") - if 'assistant_knowledge_count' in response: - print(f" - 助手知识条目数: {response.get('assistant_knowledge_count', 0)}") - else: - print(f" 第{i}种配置: ❌ 失败 - {response.get('message', '未知错误')}") - else: - print(f" 第{i}种配置: ❌ 失败 - 无效响应格式") - else: - print(f" 第{i}种配置: ❌ 失败 - 无响应内容") - - # 短暂延迟 - await asyncio.sleep(0.1) - - print(f"\n✅ 用户画像测试完成: {success_count}/{len(test_configs)} 成功") - return success_count >= 3 # 至少3种配置成功 - - except Exception as e: - print(f"❌ 用户画像测试失败: {e}") - return False - - async def run_all_tests(self): - """运行所有测试""" - print("🚀 开始MemoryOS MCP服务器综合测试") - print(f"服务器脚本: {self.server_script}") - print(f"配置文件: {self.config_file}") - print("=" * 60) - - test_results = [] - - # 运行所有测试 - tests = [ - ("服务器初始化", self.test_server_initialization), - ("工具发现", self.test_tools_discovery), - ("添加记忆 (20轮)", self.test_add_memory_tool), - ("检索记忆", self.test_retrieve_memory_tool), - ("获取用户画像", self.test_get_user_profile_tool), - ] - - for test_name, test_func in tests: - try: - result = await test_func() - test_results.append({"name": test_name, "result": result, "error": None}) - except Exception as e: - test_results.append({"name": test_name, "result": False, "error": str(e)}) - - # 输出测试结果汇总 - print("\n" + "=" * 60) - print("📊 测试结果汇总:") - - passed_count = 0 - total_count = len(test_results) - - for test in test_results: - status = "✅ 通过" if test["result"] else "❌ 失败" - print(f" {status} - {test['name']}") - if test["error"]: - print(f" 错误: {test['error']}") - if test["result"]: - passed_count += 1 - - print(f"\n总计: {passed_count}/{total_count} 测试通过") - - if passed_count == total_count: - print("🎉 所有测试通过!MemoryOS MCP服务器工作正常") - else: - print("⚠️ 部分测试失败,请检查服务器配置和实现") - - return passed_count == total_count - -def main(): - """主函数""" - import argparse - - parser = argparse.ArgumentParser(description="MemoryOS MCP服务器综合测试") - parser.add_argument("--server", default="server_new.py", help="服务器脚本路径") - parser.add_argument("--config", default="config.json", help="配置文件路径") - - args = parser.parse_args() - - try: - tester = MemoryOSMCPTester(args.server, args.config) - success = asyncio.run(tester.run_all_tests()) - sys.exit(0 if success else 1) - except KeyboardInterrupt: - print("\n⚠️ 测试被用户中断") - sys.exit(1) - except Exception as e: - print(f"\n❌ 测试过程中发生严重错误: {e}") - import traceback - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - main() \ No newline at end of file From 40787ebf2baa59150145b2792e95c266c0128814 Mon Sep 17 00:00:00 2001 From: Kang Jiazheng <108711748+kkkjz@users.noreply.github.com> Date: Sun, 13 Jul 2025 11:36:02 +0800 Subject: [PATCH 2/4] Delete memoryos-pypi directory --- memoryos-pypi/__init__.py | 3 - memoryos-pypi/long_term.py | 159 -------------- memoryos-pypi/memoryos.py | 332 ----------------------------- memoryos-pypi/mid_term.py | 370 --------------------------------- memoryos-pypi/prompts.py | 235 --------------------- memoryos-pypi/requirements.txt | 21 -- memoryos-pypi/retriever.py | 131 ------------ memoryos-pypi/short_term.py | 64 ------ memoryos-pypi/test.py | 55 ----- memoryos-pypi/updater.py | 255 ----------------------- memoryos-pypi/utils.py | 351 ------------------------------- 11 files changed, 1976 deletions(-) delete mode 100644 memoryos-pypi/__init__.py delete mode 100644 memoryos-pypi/long_term.py delete mode 100644 memoryos-pypi/memoryos.py delete mode 100644 memoryos-pypi/mid_term.py delete mode 100644 memoryos-pypi/prompts.py delete mode 100644 memoryos-pypi/requirements.txt delete mode 100644 memoryos-pypi/retriever.py delete mode 100644 memoryos-pypi/short_term.py delete mode 100644 memoryos-pypi/test.py delete mode 100644 memoryos-pypi/updater.py delete mode 100644 memoryos-pypi/utils.py diff --git a/memoryos-pypi/__init__.py b/memoryos-pypi/__init__.py deleted file mode 100644 index b97e620..0000000 --- a/memoryos-pypi/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .memoryos import Memoryos - -__all__ = ['Memoryos'] \ No newline at end of file diff --git a/memoryos-pypi/long_term.py b/memoryos-pypi/long_term.py deleted file mode 100644 index 1001f07..0000000 --- a/memoryos-pypi/long_term.py +++ /dev/null @@ -1,159 +0,0 @@ -import json -import numpy as np -import faiss -from collections import deque -try: - from .utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists -except ImportError: - from utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists - -class LongTermMemory: - def __init__(self, file_path, knowledge_capacity=100): - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.knowledge_capacity = knowledge_capacity - self.user_profiles = {} # {user_id: {data: "profile_string", "last_updated": "timestamp"}} - # Use deques for knowledge bases to easily manage capacity - self.knowledge_base = deque(maxlen=self.knowledge_capacity) # For general/user private knowledge - self.assistant_knowledge = deque(maxlen=self.knowledge_capacity) # For assistant specific knowledge - self.load() - - def update_user_profile(self, user_id, new_data, merge=True): - if merge and user_id in self.user_profiles and self.user_profiles[user_id].get("data"): # Check if data exists - current_data = self.user_profiles[user_id]["data"] - if isinstance(current_data, str) and isinstance(new_data, str): - updated_data = f"{current_data}\n\n--- Updated on {get_timestamp()} ---\n{new_data}" - else: # Fallback to overwrite if types are not strings or for more complex merge - updated_data = new_data - else: - # If merge=False or no existing data, replace with new data - updated_data = new_data - - self.user_profiles[user_id] = { - "data": updated_data, - "last_updated": get_timestamp() - } - print(f"LongTermMemory: Updated user profile for {user_id} (merge={merge}).") - self.save() - - def get_raw_user_profile(self, user_id): - return self.user_profiles.get(user_id, {}).get("data", "None") # Return "None" string if not found - - def get_user_profile_data(self, user_id): - return self.user_profiles.get(user_id, {}) - - def add_knowledge_entry(self, knowledge_text, knowledge_deque: deque, type_name="knowledge"): - if not knowledge_text or knowledge_text.strip().lower() in ["", "none", "- none", "- none."]: - print(f"LongTermMemory: Empty {type_name} received, not saving.") - return - - # If deque is full, the oldest item is automatically removed when appending. - vec = get_embedding(knowledge_text) - vec = normalize_vector(vec).tolist() - entry = { - "knowledge": knowledge_text, - "timestamp": get_timestamp(), - "knowledge_embedding": vec - } - knowledge_deque.append(entry) - print(f"LongTermMemory: Added {type_name}. Current count: {len(knowledge_deque)}.") - self.save() - - def add_user_knowledge(self, knowledge_text): - self.add_knowledge_entry(knowledge_text, self.knowledge_base, "user knowledge") - - def add_assistant_knowledge(self, knowledge_text): - self.add_knowledge_entry(knowledge_text, self.assistant_knowledge, "assistant knowledge") - - def get_user_knowledge(self): - return list(self.knowledge_base) - - def get_assistant_knowledge(self): - return list(self.assistant_knowledge) - - def _search_knowledge_deque(self, query, knowledge_deque: deque, threshold=0.1, top_k=5): - if not knowledge_deque: - return [] - - query_vec = get_embedding(query) - query_vec = normalize_vector(query_vec) - - embeddings = [] - valid_entries = [] - for entry in knowledge_deque: - if "knowledge_embedding" in entry and entry["knowledge_embedding"]: - embeddings.append(np.array(entry["knowledge_embedding"], dtype=np.float32)) - valid_entries.append(entry) - else: - print(f"Warning: Entry without embedding found in knowledge_deque: {entry.get('knowledge','N/A')[:50]}") - - if not embeddings: - return [] - - embeddings_np = np.array(embeddings, dtype=np.float32) - if embeddings_np.ndim == 1: # Single item case - if embeddings_np.shape[0] == 0: return [] # Empty embeddings - embeddings_np = embeddings_np.reshape(1, -1) - - if embeddings_np.shape[0] == 0: # No valid embeddings - return [] - - dim = embeddings_np.shape[1] - index = faiss.IndexFlatIP(dim) # Using Inner Product for similarity - index.add(embeddings_np) - - query_arr = np.array([query_vec], dtype=np.float32) - distances, indices = index.search(query_arr, min(top_k, len(valid_entries))) # Search at most k or length of valid_entries - - results = [] - for i, idx in enumerate(indices[0]): - if idx != -1: # faiss returns -1 for no valid index - similarity_score = float(distances[0][i]) # For IndexFlatIP, distance is the dot product (similarity) - if similarity_score >= threshold: - results.append(valid_entries[idx]) # Add the original entry dict - - # Sort by similarity score descending before returning, as faiss might not guarantee order for IP - results.sort(key=lambda x: float(np.dot(np.array(x["knowledge_embedding"], dtype=np.float32), query_vec)), reverse=True) - return results - - def search_user_knowledge(self, query, threshold=0.1, top_k=5): - results = self._search_knowledge_deque(query, self.knowledge_base, threshold, top_k) - print(f"LongTermMemory: Searched user knowledge for '{query[:30]}...'. Found {len(results)} matches.") - return results - - def search_assistant_knowledge(self, query, threshold=0.1, top_k=5): - results = self._search_knowledge_deque(query, self.assistant_knowledge, threshold, top_k) - print(f"LongTermMemory: Searched assistant knowledge for '{query[:30]}...'. Found {len(results)} matches.") - return results - - def save(self): - data = { - "user_profiles": self.user_profiles, - "knowledge_base": list(self.knowledge_base), # Convert deques to lists for JSON serialization - "assistant_knowledge": list(self.assistant_knowledge) - } - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(data, f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving LongTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.user_profiles = data.get("user_profiles", {}) - # Load into deques, respecting maxlen - kb_data = data.get("knowledge_base", []) - self.knowledge_base = deque(kb_data, maxlen=self.knowledge_capacity) - - ak_data = data.get("assistant_knowledge", []) - self.assistant_knowledge = deque(ak_data, maxlen=self.knowledge_capacity) - - print(f"LongTermMemory: Loaded from {self.file_path}.") - except FileNotFoundError: - print(f"LongTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - print(f"LongTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - print(f"LongTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/memoryos.py b/memoryos-pypi/memoryos.py deleted file mode 100644 index 58f266c..0000000 --- a/memoryos-pypi/memoryos.py +++ /dev/null @@ -1,332 +0,0 @@ -import os -import json -from concurrent.futures import ThreadPoolExecutor, as_completed - -# 修改为绝对导入 -try: - # 尝试相对导入(当作为包使用时) - from .utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists - from . import prompts - from .short_term import ShortTermMemory - from .mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic - from .long_term import LongTermMemory - from .updater import Updater - from .retriever import Retriever -except ImportError: - # 回退到绝对导入(当作为独立模块使用时) - from utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists - import prompts - from short_term import ShortTermMemory - from mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic - from long_term import LongTermMemory - from updater import Updater - from retriever import Retriever - -# Heat threshold for triggering profile/knowledge update from mid-term memory -H_PROFILE_UPDATE_THRESHOLD = 5.0 -DEFAULT_ASSISTANT_ID = "default_assistant_profile" - -class Memoryos: - def __init__(self, user_id: str, - openai_api_key: str, - data_storage_path: str, - openai_base_url: str = None, - assistant_id: str = DEFAULT_ASSISTANT_ID, - short_term_capacity=10, - mid_term_capacity=2000, - long_term_knowledge_capacity=100, - retrieval_queue_capacity=7, - mid_term_heat_threshold=H_PROFILE_UPDATE_THRESHOLD, - mid_term_similarity_threshold=0.6, # 新增:中期记忆插入相似度阈值 - llm_model="gpt-4o-mini" # Unified model for all LLM operations - ): - self.user_id = user_id - self.assistant_id = assistant_id - self.data_storage_path = os.path.abspath(data_storage_path) - self.llm_model = llm_model - os.environ["llm_model"]= llm_model - self.mid_term_similarity_threshold = mid_term_similarity_threshold - - print(f"Initializing Memoryos for user '{self.user_id}' and assistant '{self.assistant_id}'. Data path: {self.data_storage_path}") - print(f"Using unified LLM model: {self.llm_model}") - - # Initialize OpenAI Client - self.client = OpenAIClient(api_key=openai_api_key, base_url=openai_base_url) - - # Define file paths for user-specific data - self.user_data_dir = os.path.join(self.data_storage_path, "users", self.user_id) - user_short_term_path = os.path.join(self.user_data_dir, "short_term.json") - user_mid_term_path = os.path.join(self.user_data_dir, "mid_term.json") - user_long_term_path = os.path.join(self.user_data_dir, "long_term_user.json") # User profile and their knowledge - - # Define file paths for assistant-specific data (knowledge) - self.assistant_data_dir = os.path.join(self.data_storage_path, "assistants", self.assistant_id) - assistant_long_term_path = os.path.join(self.assistant_data_dir, "long_term_assistant.json") - - # Ensure directories exist - ensure_directory_exists(user_short_term_path) # ensure_directory_exists operates on the file path, creating parent dirs - ensure_directory_exists(user_mid_term_path) - ensure_directory_exists(user_long_term_path) - ensure_directory_exists(assistant_long_term_path) - - # Initialize Memory Modules for User - self.short_term_memory = ShortTermMemory(file_path=user_short_term_path, max_capacity=short_term_capacity) - self.mid_term_memory = MidTermMemory(file_path=user_mid_term_path, client=self.client, max_capacity=mid_term_capacity) - self.user_long_term_memory = LongTermMemory(file_path=user_long_term_path, knowledge_capacity=long_term_knowledge_capacity) - - # Initialize Memory Module for Assistant Knowledge - self.assistant_long_term_memory = LongTermMemory(file_path=assistant_long_term_path, knowledge_capacity=long_term_knowledge_capacity) - - # Initialize Orchestration Modules - self.updater = Updater(short_term_memory=self.short_term_memory, - mid_term_memory=self.mid_term_memory, - long_term_memory=self.user_long_term_memory, # Updater primarily updates user's LTM profile/knowledge - client=self.client, - topic_similarity_threshold=mid_term_similarity_threshold, # 传递中期记忆相似度阈值 - llm_model=self.llm_model) - self.retriever = Retriever( - mid_term_memory=self.mid_term_memory, - long_term_memory=self.user_long_term_memory, - assistant_long_term_memory=self.assistant_long_term_memory, # Pass assistant LTM - queue_capacity=retrieval_queue_capacity - ) - - self.mid_term_heat_threshold = mid_term_heat_threshold - - def _trigger_profile_and_knowledge_update_if_needed(self): - """ - Checks mid-term memory for hot segments and triggers profile/knowledge update if threshold is met. - Adapted from main_memoybank.py's update_user_profile_from_top_segment. - Enhanced with parallel LLM processing for better performance. - """ - if not self.mid_term_memory.heap: - return - - # Peek at the top of the heap (hottest segment) - # MidTermMemory heap stores (-H_segment, sid) - neg_heat, sid = self.mid_term_memory.heap[0] - current_heat = -neg_heat - - if current_heat >= self.mid_term_heat_threshold: - session = self.mid_term_memory.sessions.get(sid) - if not session: - self.mid_term_memory.rebuild_heap() # Clean up if session is gone - return - - # Get unanalyzed pages from this hot session - # A page is a dict: {"user_input": ..., "agent_response": ..., "timestamp": ..., "analyzed": False, ...} - unanalyzed_pages = [p for p in session.get("details", []) if not p.get("analyzed", False)] - - if unanalyzed_pages: - print(f"Memoryos: Mid-term session {sid} heat ({current_heat:.2f}) exceeded threshold. Analyzing {len(unanalyzed_pages)} pages for profile/knowledge update.") - - # 并行执行两个LLM任务:用户画像分析(已包含更新)、知识提取 - def task_user_profile_analysis(): - print("Memoryos: Starting parallel user profile analysis and update...") - # 获取现有用户画像 - existing_profile = self.user_long_term_memory.get_raw_user_profile(self.user_id) - if not existing_profile or existing_profile.lower() == "none": - existing_profile = "No existing profile data." - - # 直接输出更新后的完整画像 - return gpt_user_profile_analysis(unanalyzed_pages, self.client, model=self.llm_model, existing_user_profile=existing_profile) - - def task_knowledge_extraction(): - print("Memoryos: Starting parallel knowledge extraction...") - return gpt_knowledge_extraction(unanalyzed_pages, self.client, model=self.llm_model) - - # 使用并行任务执行 - with ThreadPoolExecutor(max_workers=2) as executor: - # 提交两个主要任务 - future_profile = executor.submit(task_user_profile_analysis) - future_knowledge = executor.submit(task_knowledge_extraction) - - # 等待结果 - try: - updated_user_profile = future_profile.result() # 直接是更新后的完整画像 - knowledge_result = future_knowledge.result() - except Exception as e: - print(f"Error in parallel LLM processing: {e}") - return - - new_user_private_knowledge = knowledge_result.get("private") - new_assistant_knowledge = knowledge_result.get("assistant_knowledge") - - # 直接使用更新后的完整用户画像 - if updated_user_profile and updated_user_profile.lower() != "none": - print("Memoryos: Updating user profile with integrated analysis...") - self.user_long_term_memory.update_user_profile(self.user_id, updated_user_profile, merge=False) # 直接替换为新的完整画像 - - # Add User Private Knowledge to user's LTM - if new_user_private_knowledge and new_user_private_knowledge.lower() != "none": - for line in new_user_private_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.user_long_term_memory.add_user_knowledge(line.strip()) - - # Add Assistant Knowledge to assistant's LTM - if new_assistant_knowledge and new_assistant_knowledge.lower() != "none": - for line in new_assistant_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.assistant_long_term_memory.add_assistant_knowledge(line.strip()) # Save to dedicated assistant LTM - - # Mark pages as analyzed and reset session heat contributors - for p in session["details"]: - p["analyzed"] = True # Mark all pages in session, or just unanalyzed_pages? - # Original code marked all pages in session - - session["N_visit"] = 0 # Reset visits after analysis - session["L_interaction"] = 0 # Reset interaction length contribution - # session["R_recency"] = 1.0 # Recency will re-calculate naturally - session["H_segment"] = compute_segment_heat(session) # Recompute heat with reset factors - session["last_visit_time"] = get_timestamp() # Update last visit time - - self.mid_term_memory.rebuild_heap() # Heap needs rebuild due to H_segment change - self.mid_term_memory.save() - print(f"Memoryos: Profile/Knowledge update for session {sid} complete. Heat reset.") - else: - print(f"Memoryos: Hot session {sid} has no unanalyzed pages. Skipping profile update.") - else: - # print(f"Memoryos: Top session {sid} heat ({current_heat:.2f}) below threshold. No profile update.") - pass # No action if below threshold - - def add_memory(self, user_input: str, agent_response: str, timestamp: str = None, meta_data: dict = None): - """ - Adds a new QA pair (memory) to the system. - meta_data is not used in the current refactoring but kept for future use. - """ - if not timestamp: - timestamp = get_timestamp() - - qa_pair = { - "user_input": user_input, - "agent_response": agent_response, - "timestamp": timestamp - # meta_data can be added here if it needs to be stored with the QA pair - } - self.short_term_memory.add_qa_pair(qa_pair) - print(f"Memoryos: Added QA to short-term. User: {user_input[:30]}...") - - if self.short_term_memory.is_full(): - print("Memoryos: Short-term memory full. Processing to mid-term.") - self.updater.process_short_term_to_mid_term() - - # After any memory addition that might impact mid-term, check for profile updates - self._trigger_profile_and_knowledge_update_if_needed() - - def get_response(self, query: str, relationship_with_user="friend", style_hint="", user_conversation_meta_data: dict = None) -> str: - """ - Generates a response to the user's query, incorporating memory and context. - """ - print(f"Memoryos: Generating response for query: '{query[:50]}...'") - - # 1. Retrieve context - retrieval_results = self.retriever.retrieve_context( - user_query=query, - user_id=self.user_id - # Using default thresholds from Retriever class for now - ) - retrieved_pages = retrieval_results["retrieved_pages"] - retrieved_user_knowledge = retrieval_results["retrieved_user_knowledge"] - retrieved_assistant_knowledge = retrieval_results["retrieved_assistant_knowledge"] - - # 2. Get short-term history - short_term_history = self.short_term_memory.get_all() - history_text = "\n".join([ - f"User: {qa.get('user_input', '')}\nAssistant: {qa.get('agent_response', '')} (Time: {qa.get('timestamp', '')})" - for qa in short_term_history - ]) - - # 3. Format retrieved mid-term pages (retrieval_queue equivalent) - retrieval_text = "\n".join([ - f"【Historical Memory】\nUser: {page.get('user_input', '')}\nAssistant: {page.get('agent_response', '')}\nTime: {page.get('timestamp', '')}\nConversation chain overview: {page.get('meta_info','N/A')}" - for page in retrieved_pages - ]) - - # 4. Get user profile - user_profile_text = self.user_long_term_memory.get_raw_user_profile(self.user_id) - if not user_profile_text or user_profile_text.lower() == "none": - user_profile_text = "No detailed profile available yet." - - # 5. Format retrieved user knowledge for background - user_knowledge_background = "" - if retrieved_user_knowledge: - user_knowledge_background = "\n【Relevant User Knowledge Entries】\n" - for kn_entry in retrieved_user_knowledge: - user_knowledge_background += f"- {kn_entry['knowledge']} (Recorded: {kn_entry['timestamp']})\n" - - background_context = f"【User Profile】\n{user_profile_text}\n{user_knowledge_background}" - - # 6. Format retrieved Assistant Knowledge (from assistant's LTM) - # Use retrieved assistant knowledge instead of all assistant knowledge - assistant_knowledge_text_for_prompt = "【Assistant Knowledge Base】\n" - if retrieved_assistant_knowledge: - for ak_entry in retrieved_assistant_knowledge: - assistant_knowledge_text_for_prompt += f"- {ak_entry['knowledge']} (Recorded: {ak_entry['timestamp']})\n" - else: - assistant_knowledge_text_for_prompt += "- No relevant assistant knowledge found for this query.\n" - - # 7. Format user_conversation_meta_data (if provided) - meta_data_text_for_prompt = "【Current Conversation Metadata】\n" - if user_conversation_meta_data: - try: - meta_data_text_for_prompt += json.dumps(user_conversation_meta_data, ensure_ascii=False, indent=2) - except TypeError: - meta_data_text_for_prompt += str(user_conversation_meta_data) - else: - meta_data_text_for_prompt += "None provided for this turn." - - # 8. Construct Prompts - system_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT.format( - relationship=relationship_with_user, - assistant_knowledge_text=assistant_knowledge_text_for_prompt, - meta_data_text=meta_data_text_for_prompt # Using meta_data_text placeholder for user_conversation_meta_data - ) - - user_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_USER_PROMPT.format( - history_text=history_text, - retrieval_text=retrieval_text, - background=background_context, - relationship=relationship_with_user, - query=query - ) - - messages = [ - {"role": "system", "content": system_prompt_text}, - {"role": "user", "content": user_prompt_text} - ] - - # 9. Call LLM for response - print("Memoryos: Calling LLM for final response generation...") - # print("System Prompt:\n", system_prompt_text) - # print("User Prompt:\n", user_prompt_text) - response_content = self.client.chat_completion( - model=self.llm_model, - messages=messages, - temperature=0.7, - max_tokens=1500 # As in original main - ) - - # 10. Add this interaction to memory - self.add_memory(user_input=query, agent_response=response_content, timestamp=get_timestamp()) - - return response_content - - # --- Helper/Maintenance methods (optional additions) --- - def get_user_profile_summary(self) -> str: - return self.user_long_term_memory.get_raw_user_profile(self.user_id) - - def get_assistant_knowledge_summary(self) -> list: - return self.assistant_long_term_memory.get_assistant_knowledge() - - def force_mid_term_analysis(self): - """Forces analysis of all unanalyzed pages in the hottest mid-term segment if heat is above 0. - Useful for testing or manual triggering. - """ - original_threshold = self.mid_term_heat_threshold - self.mid_term_heat_threshold = 0.0 # Temporarily lower threshold - print("Memoryos: Force-triggering mid-term analysis...") - self._trigger_profile_and_knowledge_update_if_needed() - self.mid_term_heat_threshold = original_threshold # Restore original threshold - - def __repr__(self): - return f"" diff --git a/memoryos-pypi/mid_term.py b/memoryos-pypi/mid_term.py deleted file mode 100644 index 4147112..0000000 --- a/memoryos-pypi/mid_term.py +++ /dev/null @@ -1,370 +0,0 @@ -import json -import numpy as np -from collections import defaultdict -import faiss -import heapq -from datetime import datetime - -try: - from .utils import ( - get_timestamp, generate_id, get_embedding, normalize_vector, - llm_extract_keywords, compute_time_decay, ensure_directory_exists, OpenAIClient - ) -except ImportError: - from utils import ( - get_timestamp, generate_id, get_embedding, normalize_vector, - llm_extract_keywords, compute_time_decay, ensure_directory_exists, OpenAIClient - ) - -# Heat computation constants (can be tuned or made configurable) -HEAT_ALPHA = 1.0 -HEAT_BETA = 1.0 -HEAT_GAMMA = 1 -RECENCY_TAU_HOURS = 24 # For R_recency calculation in compute_segment_heat - -def compute_segment_heat(session, alpha=HEAT_ALPHA, beta=HEAT_BETA, gamma=HEAT_GAMMA, tau_hours=RECENCY_TAU_HOURS): - N_visit = session.get("N_visit", 0) - L_interaction = session.get("L_interaction", 0) - - # Calculate recency based on last_visit_time - R_recency = 1.0 # Default if no last_visit_time - if session.get("last_visit_time"): - R_recency = compute_time_decay(session["last_visit_time"], get_timestamp(), tau_hours) - - session["R_recency"] = R_recency # Update session's recency factor - return alpha * N_visit + beta * L_interaction + gamma * R_recency - -class MidTermMemory: - def __init__(self, file_path: str, client: OpenAIClient, max_capacity=2000): - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.client = client - self.max_capacity = max_capacity - self.sessions = {} # {session_id: session_object} - self.access_frequency = defaultdict(int) # {session_id: access_count_for_lfu} - self.heap = [] # Min-heap storing (-H_segment, session_id) for hottest segments - self.load() - - def get_page_by_id(self, page_id): - for session in self.sessions.values(): - for page in session.get("details", []): - if page.get("page_id") == page_id: - return page - return None - - def update_page_connections(self, prev_page_id, next_page_id): - if prev_page_id: - prev_page = self.get_page_by_id(prev_page_id) - if prev_page: - prev_page["next_page"] = next_page_id - if next_page_id: - next_page = self.get_page_by_id(next_page_id) - if next_page: - next_page["pre_page"] = prev_page_id - # self.save() # Avoid saving on every minor update; save at higher level operations - - def evict_lfu(self): - if not self.access_frequency or not self.sessions: - return - - lfu_sid = min(self.access_frequency, key=self.access_frequency.get) - print(f"MidTermMemory: LFU eviction. Session {lfu_sid} has lowest access frequency.") - - if lfu_sid not in self.sessions: - del self.access_frequency[lfu_sid] # Clean up access frequency if session already gone - self.rebuild_heap() - return - - session_to_delete = self.sessions.pop(lfu_sid) # Remove from sessions - del self.access_frequency[lfu_sid] # Remove from LFU tracking - - # Clean up page connections if this session's pages were linked - for page in session_to_delete.get("details", []): - prev_page_id = page.get("pre_page") - next_page_id = page.get("next_page") - # If a page from this session was linked to an external page, nullify the external link - if prev_page_id and not self.get_page_by_id(prev_page_id): # Check if prev page is still in memory - # This case should ideally not happen if connections are within sessions or handled carefully - pass - if next_page_id and not self.get_page_by_id(next_page_id): - pass - # More robustly, one might need to search all other sessions if inter-session linking was allowed - # For now, assuming internal consistency or that MemoryOS class manages higher-level links - - self.rebuild_heap() - self.save() - print(f"MidTermMemory: Evicted session {lfu_sid}.") - - def add_session(self, summary, details): - session_id = generate_id("session") - summary_vec = get_embedding(summary) - summary_vec = normalize_vector(summary_vec).tolist() - summary_keywords = list(llm_extract_keywords(summary, client=self.client)) - - processed_details = [] - for page_data in details: - page_id = page_data.get("page_id", generate_id("page")) - - # 检查是否已有embedding,避免重复计算 - if "page_embedding" in page_data and page_data["page_embedding"]: - print(f"MidTermMemory: Reusing existing embedding for page {page_id}") - inp_vec = page_data["page_embedding"] - # 确保embedding是normalized的 - if isinstance(inp_vec, list): - inp_vec_np = np.array(inp_vec, dtype=np.float32) - if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize - inp_vec = normalize_vector(inp_vec_np).tolist() - else: - print(f"MidTermMemory: Computing new embedding for page {page_id}") - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - inp_vec = get_embedding(full_text) - inp_vec = normalize_vector(inp_vec).tolist() - - # 检查是否已有keywords,避免重复计算 - if "page_keywords" in page_data and page_data["page_keywords"]: - print(f"MidTermMemory: Reusing existing keywords for page {page_id}") - page_keywords = page_data["page_keywords"] - else: - print(f"MidTermMemory: Computing new keywords for page {page_id}") - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - page_keywords = list(llm_extract_keywords(full_text, client=self.client)) - - processed_page = { - **page_data, # Carry over existing fields like user_input, agent_response, timestamp - "page_id": page_id, - "page_embedding": inp_vec, - "page_keywords": page_keywords, - "preloaded": page_data.get("preloaded", False), # Preserve if passed - "analyzed": page_data.get("analyzed", False), # Preserve if passed - # pre_page, next_page, meta_info are handled by DynamicUpdater - } - processed_details.append(processed_page) - - current_ts = get_timestamp() - session_obj = { - "id": session_id, - "summary": summary, - "summary_keywords": summary_keywords, - "summary_embedding": summary_vec, - "details": processed_details, - "L_interaction": len(processed_details), - "R_recency": 1.0, # Initial recency - "N_visit": 0, - "H_segment": 0.0, # Initial heat, will be computed - "timestamp": current_ts, # Creation timestamp - "last_visit_time": current_ts, # Also initial last_visit_time for recency calc - "access_count_lfu": 0 # For LFU eviction policy - } - session_obj["H_segment"] = compute_segment_heat(session_obj) - self.sessions[session_id] = session_obj - self.access_frequency[session_id] = 0 # Initialize for LFU - heapq.heappush(self.heap, (-session_obj["H_segment"], session_id)) # Use negative heat for max-heap behavior - - print(f"MidTermMemory: Added new session {session_id}. Initial heat: {session_obj['H_segment']:.2f}.") - if len(self.sessions) > self.max_capacity: - self.evict_lfu() - self.save() - return session_id - - def rebuild_heap(self): - self.heap = [] - for sid, session_data in self.sessions.items(): - # Ensure H_segment is up-to-date before rebuilding heap if necessary - # session_data["H_segment"] = compute_segment_heat(session_data) - heapq.heappush(self.heap, (-session_data["H_segment"], sid)) - # heapq.heapify(self.heap) # Not needed if pushing one by one - # No save here, it's an internal operation often followed by other ops that save - - def insert_pages_into_session(self, summary_for_new_pages, keywords_for_new_pages, pages_to_insert, - similarity_threshold=0.6, keyword_similarity_alpha=1.0): - if not self.sessions: # If no existing sessions, just add as a new one - print("MidTermMemory: No existing sessions. Adding new session directly.") - return self.add_session(summary_for_new_pages, pages_to_insert) - - new_summary_vec = get_embedding(summary_for_new_pages) - new_summary_vec = normalize_vector(new_summary_vec) - - best_sid = None - best_overall_score = -1 - - for sid, existing_session in self.sessions.items(): - existing_summary_vec = np.array(existing_session["summary_embedding"], dtype=np.float32) - semantic_sim = float(np.dot(existing_summary_vec, new_summary_vec)) - - # Keyword similarity (Jaccard index based) - existing_keywords = set(existing_session.get("summary_keywords", [])) - new_keywords_set = set(keywords_for_new_pages) - s_topic_keywords = 0 - if existing_keywords and new_keywords_set: - intersection = len(existing_keywords.intersection(new_keywords_set)) - union = len(existing_keywords.union(new_keywords_set)) - if union > 0: - s_topic_keywords = intersection / union - - overall_score = semantic_sim + keyword_similarity_alpha * s_topic_keywords - - if overall_score > best_overall_score: - best_overall_score = overall_score - best_sid = sid - - if best_sid and best_overall_score >= similarity_threshold: - print(f"MidTermMemory: Merging pages into session {best_sid}. Score: {best_overall_score:.2f} (Threshold: {similarity_threshold})") - target_session = self.sessions[best_sid] - - processed_new_pages = [] - for page_data in pages_to_insert: - page_id = page_data.get("page_id", generate_id("page")) # Use existing or generate new ID - - # 检查是否已有embedding,避免重复计算 - if "page_embedding" in page_data and page_data["page_embedding"]: - print(f"MidTermMemory: Reusing existing embedding for page {page_id}") - inp_vec = page_data["page_embedding"] - # 确保embedding是normalized的 - if isinstance(inp_vec, list): - inp_vec_np = np.array(inp_vec, dtype=np.float32) - if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize - inp_vec = normalize_vector(inp_vec_np).tolist() - else: - print(f"MidTermMemory: Computing new embedding for page {page_id}") - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - inp_vec = get_embedding(full_text) - inp_vec = normalize_vector(inp_vec).tolist() - - # 检查是否已有keywords,避免重复计算 - if "page_keywords" in page_data and page_data["page_keywords"]: - print(f"MidTermMemory: Reusing existing keywords for page {page_id}") - page_keywords_current = page_data["page_keywords"] - else: - print(f"MidTermMemory: Computing new keywords for page {page_id}") - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - page_keywords_current = list(llm_extract_keywords(full_text, client=self.client)) - - processed_page = { - **page_data, # Carry over existing fields - "page_id": page_id, - "page_embedding": inp_vec, - "page_keywords": page_keywords_current, - # analyzed, preloaded flags should be part of page_data if set - } - target_session["details"].append(processed_page) - processed_new_pages.append(processed_page) - - target_session["L_interaction"] += len(pages_to_insert) - target_session["last_visit_time"] = get_timestamp() # Update last visit time on modification - target_session["H_segment"] = compute_segment_heat(target_session) - self.rebuild_heap() # Rebuild heap as heat has changed - self.save() - return best_sid - else: - print(f"MidTermMemory: No suitable session to merge (best score {best_overall_score:.2f} < threshold {similarity_threshold}). Creating new session.") - return self.add_session(summary_for_new_pages, pages_to_insert) - - def search_sessions(self, query_text, segment_similarity_threshold=0.1, page_similarity_threshold=0.1, - top_k_sessions=5, keyword_alpha=1.0, recency_tau_search=3600): - if not self.sessions: - return [] - - query_vec = get_embedding(query_text) - query_vec = normalize_vector(query_vec) - query_keywords = set(llm_extract_keywords(query_text, client=self.client)) - - candidate_sessions = [] - session_ids = list(self.sessions.keys()) - if not session_ids: return [] - - summary_embeddings_list = [self.sessions[s]["summary_embedding"] for s in session_ids] - summary_embeddings_np = np.array(summary_embeddings_list, dtype=np.float32) - - dim = summary_embeddings_np.shape[1] - index = faiss.IndexFlatIP(dim) # Inner product for similarity - index.add(summary_embeddings_np) - - query_arr_np = np.array([query_vec], dtype=np.float32) - distances, indices = index.search(query_arr_np, min(top_k_sessions, len(session_ids))) - - results = [] - current_time_str = get_timestamp() - - for i, idx in enumerate(indices[0]): - if idx == -1: continue - - session_id = session_ids[idx] - session = self.sessions[session_id] - semantic_sim_score = float(distances[0][i]) # This is the dot product - - # Keyword similarity for session summary - session_keywords = set(session.get("summary_keywords", [])) - s_topic_keywords = 0 - if query_keywords and session_keywords: - intersection = len(query_keywords.intersection(session_keywords)) - union = len(query_keywords.union(session_keywords)) - if union > 0: s_topic_keywords = intersection / union - - # Time decay for session recency in search scoring - # time_decay_factor = compute_time_decay(session["timestamp"], current_time_str, tau_hours=recency_tau_search) - - # Combined score for session relevance - session_relevance_score = (semantic_sim_score + keyword_alpha * s_topic_keywords) - - if session_relevance_score >= segment_similarity_threshold: - matched_pages_in_session = [] - for page in session.get("details", []): - page_embedding = np.array(page["page_embedding"], dtype=np.float32) - # page_keywords = set(page.get("page_keywords", [])) - - page_sim_score = float(np.dot(page_embedding, query_vec)) - # Can also add keyword sim for pages if needed, but keeping it simpler for now - - if page_sim_score >= page_similarity_threshold: - matched_pages_in_session.append({"page_data": page, "score": page_sim_score}) - - if matched_pages_in_session: - # Update session access stats - session["N_visit"] += 1 - session["last_visit_time"] = current_time_str - session["access_count_lfu"] = session.get("access_count_lfu", 0) + 1 - self.access_frequency[session_id] = session["access_count_lfu"] - session["H_segment"] = compute_segment_heat(session) - self.rebuild_heap() # Heat changed - - results.append({ - "session_id": session_id, - "session_summary": session["summary"], - "session_relevance_score": session_relevance_score, - "matched_pages": sorted(matched_pages_in_session, key=lambda x: x["score"], reverse=True) # Sort pages by score - }) - - self.save() # Save changes from access updates - # Sort final results by session_relevance_score - return sorted(results, key=lambda x: x["session_relevance_score"], reverse=True) - - def save(self): - # Make a copy for saving to avoid modifying heap during iteration if it happens - # Though current heap is list of tuples, so direct modification risk is low - # sessions_to_save = {sid: data for sid, data in self.sessions.items()} - data_to_save = { - "sessions": self.sessions, - "access_frequency": dict(self.access_frequency), # Convert defaultdict to dict for JSON - # Heap is derived, no need to save typically, but can if desired for faster load - # "heap_snapshot": self.heap - } - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(data_to_save, f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving MidTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - self.sessions = data.get("sessions", {}) - self.access_frequency = defaultdict(int, data.get("access_frequency", {})) - self.rebuild_heap() # Rebuild heap from loaded sessions - print(f"MidTermMemory: Loaded from {self.file_path}. Sessions: {len(self.sessions)}.") - except FileNotFoundError: - print(f"MidTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - print(f"MidTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - print(f"MidTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/prompts.py b/memoryos-pypi/prompts.py deleted file mode 100644 index 46d68fb..0000000 --- a/memoryos-pypi/prompts.py +++ /dev/null @@ -1,235 +0,0 @@ -""" -This file stores all the prompts used by the Memoryos system. -""" - -# Prompt for generating system response (from main_memoybank.py, generate_system_response_with_meta) -GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT = ( - "As a communication expert with outstanding communication habits, you embody the role of {relationship} throughout the following dialogues.\n" - "Here are some of your distinctive personal traits and knowledge:\n{assistant_knowledge_text}\n" - "User's profile:\n" - "{meta_data_text}\n" - "Your task is to generate responses that align with these traits and maintain the tone.\n" -) - -GENERATE_SYSTEM_RESPONSE_USER_PROMPT = ( - "\n" - "Drawing from your recent conversation with the user:\n" - "{history_text}\n\n" - "\n" - "The memories linked to the ongoing conversation are:\n" - "{retrieval_text}\n\n" - "\n" - "During the conversation process between you and the user in the past, you found that the user has the following characteristics:\n" - "{background}\n\n" - "Now, please role-play as {relationship} to continue the dialogue between you and the user.\n" - "The user just said: {query}\n" - "Please respond to the user's statement using the following format (maximum 30 words, must be in English):\n " - "When answering questions, be sure to check whether the timestamp of the referenced information matches the timeframe of the question" -) - -# Prompt for assistant knowledge extraction (from utils.py, analyze_assistant_knowledge) -ASSISTANT_KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are an assistant knowledge extraction engine. Rules: -1. Extract ONLY explicit statements about the assistant's identity or knowledge. -2. Use concise and factual statements in the first person. -3. If no relevant information is found, output "None".""" - -ASSISTANT_KNOWLEDGE_EXTRACTION_USER_PROMPT = """ -# Assistant Knowledge Extraction Task -Analyze the conversation and extract any fact or identity traits about the assistant. -If no traits can be extracted, reply with "None". Use the following format for output: -The generated content should be as concise as possible — the more concise, the better. -【Assistant Knowledge】 -- [Fact 1] -- [Fact 2] -- (Or "None" if none found) - -Few-shot examples: -1. User: Can you recommend some movies. - AI: Yes, I recommend Interstellar. - Time: 2023-10-01 - 【Assistant Knowledge】 - - I recommend Interstellar on 2023-10-01. - -2. User: Can you help me with cooking recipes? - AI: Yes, I have extensive knowledge of cooking recipes and techniques. - Time: 2023-10-02 - 【Assistant Knowledge】 - - I have cooking recipes and techniques on 2023-10-02. - -3. User: That's interesting. I didn't know you could do that. - AI: I'm glad you find it interesting! - 【Assistant Knowledge】 - - None - -Conversation: -{conversation} -""" - -# Prompt for summarizing dialogs (from utils.py, gpt_summarize) -SUMMARIZE_DIALOGS_SYSTEM_PROMPT = "You are an expert in summarizing dialogue topics. Generate extremely concise and precise summaries. Be as brief as possible while capturing the essence." -SUMMARIZE_DIALOGS_USER_PROMPT = "Please generate an concise topic summary based on the following conversation. Keep it to 2-3 short sentences maximum:\n{dialog_text}\nConcise Summary:" - -# Prompt for multi-summary generation (from utils.py, gpt_generate_multi_summary) -MULTI_SUMMARY_SYSTEM_PROMPT = "You are an expert in analyzing dialogue topics. Generate concise summaries. No more than two topics. Be as brief as possible." -MULTI_SUMMARY_USER_PROMPT = ("Please analyze the following dialogue and generate extremely concise subtopic summaries (if applicable), with a maximum of two themes.\n" - "Each summary should be very brief - just a few words for the theme and content. Format as JSON array:\n" - "[\n {{\"theme\": \"Brief theme\", \"keywords\": [\"key1\", \"key2\"], \"content\": \"summary\"}}\n]\n" - "\nConversation content:\n{text}") - -# Prompt for personality analysis (NEW TEMPLATE) -PERSONALITY_ANALYSIS_SYSTEM_PROMPT = """You are a professional user preference analysis assistant. Your task is to analyze the user's personality preferences from the given dialogue based on the provided dimensions. - -For each dimension: -1. Carefully read the conversation and determine if the dimension is reflected. -2. If reflected, determine the user's preference level: High / Medium / Low, and briefly explain the reasoning, including time, people, and context if possible. -3. If the dimension is not reflected, do not extract or list it. - -Focus only on the user's preferences and traits for the personality analysis section. -Output only the user profile section. -""" - -PERSONALITY_ANALYSIS_USER_PROMPT = """Please analyze the latest user-AI conversation below and update the user profile based on the 90 personality preference dimensions. - -Here are the 90 dimensions and their explanations: - -[Psychological Model (Basic Needs & Personality)] -Extraversion: Preference for social activities. -Openness: Willingness to embrace new ideas and experiences. -Agreeableness: Tendency to be friendly and cooperative. -Conscientiousness: Responsibility and organizational ability. -Neuroticism: Emotional stability and sensitivity. -Physiological Needs: Concern for comfort and basic needs. -Need for Security: Emphasis on safety and stability. -Need for Belonging: Desire for group affiliation. -Need for Self-Esteem: Need for respect and recognition. -Cognitive Needs: Desire for knowledge and understanding. -Aesthetic Appreciation: Appreciation for beauty and art. -Self-Actualization: Pursuit of one's full potential. -Need for Order: Preference for cleanliness and organization. -Need for Autonomy: Preference for independent decision-making and action. -Need for Power: Desire to influence or control others. -Need for Achievement: Value placed on accomplishments. - -[AI Alignment Dimensions] -Helpfulness: Whether the AI's response is practically useful to the user. (This reflects user's expectation of AI) -Honesty: Whether the AI's response is truthful. (This reflects user's expectation of AI) -Safety: Avoidance of sensitive or harmful content. (This reflects user's expectation of AI) -Instruction Compliance: Strict adherence to user instructions. (This reflects user's expectation of AI) -Truthfulness: Accuracy and authenticity of content. (This reflects user's expectation of AI) -Coherence: Clarity and logical consistency of expression. (This reflects user's expectation of AI) -Complexity: Preference for detailed and complex information. -Conciseness: Preference for brief and clear responses. - -[Content Platform Interest Tags] -Science Interest: Interest in science topics. -Education Interest: Concern with education and learning. -Psychology Interest: Interest in psychology topics. -Family Concern: Interest in family and parenting. -Fashion Interest: Interest in fashion topics. -Art Interest: Engagement with or interest in art. -Health Concern: Concern with physical health and lifestyle. -Financial Management Interest: Interest in finance and budgeting. -Sports Interest: Interest in sports and physical activity. -Food Interest: Passion for cooking and cuisine. -Travel Interest: Interest in traveling and exploring new places. -Music Interest: Interest in music appreciation or creation. -Literature Interest: Interest in literature and reading. -Film Interest: Interest in movies and cinema. -Social Media Activity: Frequency and engagement with social media. -Tech Interest: Interest in technology and innovation. -Environmental Concern: Attention to environmental and sustainability issues. -History Interest: Interest in historical knowledge and topics. -Political Concern: Interest in political and social issues. -Religious Interest: Interest in religion and spirituality. -Gaming Interest: Enjoyment of video games or board games. -Animal Concern: Concern for animals or pets. -Emotional Expression: Preference for direct vs. restrained emotional expression. -Sense of Humor: Preference for humorous or serious communication style. -Information Density: Preference for detailed vs. concise information. -Language Style: Preference for formal vs. casual tone. -Practicality: Preference for practical advice vs. theoretical discussion. - -**Task Instructions:** -1. Review the existing user profile below -2. Analyze the new conversation for evidence of the 90 dimensions above -3. Update and integrate the findings into a comprehensive user profile -4. For each dimension that can be identified, use the format: Dimension ( Level(High/Medium/Low) ) -5. Include brief reasoning for each dimension when possible -6. Maintain existing insights from the old profile while incorporating new observations -7. If a dimension cannot be inferred from either the old profile or new conversation, do not include it - -**Existing User Profile:** -{existing_user_profile} - -**Latest User-AI Conversation:** -{conversation} - -**Updated User Profile:** -Please provide the comprehensive updated user profile below, combining insights from both the existing profile and new conversation:""" - -# Prompt for knowledge extraction (NEW) -KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are a knowledge extraction assistant. Your task is to extract user private data and assistant knowledge from conversations. - -Focus on: -1. User private data: personal information, preferences, or private facts about the user -2. Assistant knowledge: explicit statements about what the assistant did, provided, or demonstrated - -Be extremely concise and factual in your extractions. Use the shortest possible phrases. -""" - -KNOWLEDGE_EXTRACTION_USER_PROMPT = """Please extract user private data and assistant knowledge from the latest user-AI conversation below. - -Latest User-AI Conversation: -{conversation} - -【User Private Data】 -Extract personal information about the user. Be extremely concise - use shortest possible phrases: -- [Brief fact]: [Minimal context(Including entities and time)] -- [Brief fact]: [Minimal context(Including entities and time)] -- (If no private data found, write "None") - -【Assistant Knowledge】 -Extract what the assistant demonstrated. Use format "Assistant [action] at [time]". Be extremely brief: -- Assistant [brief action] at [time/context] -- Assistant [brief capability] during [brief context] -- (If no assistant knowledge found, write "None") -""" - -# Prompt for updating user profile (from utils.py, gpt_update_profile) -UPDATE_PROFILE_SYSTEM_PROMPT = "You are an expert in merging and updating user profiles. Integrate the new information into the old profile, maintaining consistency and improving the overall understanding of the user. Avoid redundancy. The new analysis is based on specific dimensions, try to incorporate these insights meaningfully." -UPDATE_PROFILE_USER_PROMPT = "Please update the following user profile based on the new analysis. If the old profile is empty or \"None\", create a new one based on the new analysis.\n\nOld User Profile:\n{old_profile}\n\nNew Analysis Data:\n{new_analysis}\n\nUpdated User Profile:" - -# Prompt for extracting theme (from utils.py, gpt_extract_theme) -EXTRACT_THEME_SYSTEM_PROMPT = "You are an expert in extracting the main theme from a text. Provide a concise theme." -EXTRACT_THEME_USER_PROMPT = "Please extract the main theme from the following text:\n{answer_text}\n\nTheme:" - -# Prompt for extracting keywords (from utils.py, llm_extract_keywords) -EXTRACT_KEYWORDS_SYSTEM_PROMPT = "You are an expert in keyword extraction. Extract only the most essential keywords from the text. Return 3-5 keywords maximum as a comma-separated list. Be extremely selective." -EXTRACT_KEYWORDS_USER_PROMPT = "Please extract the 3-5 most important keywords from the following text. Be very selective and concise:\n{text}\n\nKeywords:" - -# Prompt for conversation continuity check (from dynamic_update.py, _is_conversation_continuing) -CONTINUITY_CHECK_SYSTEM_PROMPT = "You are a conversation continuity detector. Return ONLY 'true' or 'false'." -CONTINUITY_CHECK_USER_PROMPT = ("Determine if these two conversation pages are continuous (true continuation without topic shift).\n" - "Return ONLY \"true\" or \"false\".\n\n" - "Previous Page:\nUser: {prev_user}\nAssistant: {prev_agent}\n\n" - "Current Page:\nUser: {curr_user}\nAssistant: {curr_agent}\n\n" - "Continuous?") - -# Prompt for generating meta info (from dynamic_update.py, _generate_meta_info) -META_INFO_SYSTEM_PROMPT = ("""You are a conversation meta-summary updater. Your task is to: -1. Preserve relevant context from previous meta-summary -2. Integrate new information from current dialogue -3. Output ONLY the updated summary (no explanations)""" ) -META_INFO_USER_PROMPT = ("""Update the conversation meta-summary by incorporating the new dialogue while maintaining continuity. - - Guidelines: - 1. Start from the previous meta-summary (if exists) - 2. Add/update information based on the new dialogue - 3. Keep it concise (1-2 sentences max) - 4. Maintain context coherence - - Previous Meta-summary: {last_meta} - New Dialogue: - {new_dialogue} - - Updated Meta-summary:""") \ No newline at end of file diff --git a/memoryos-pypi/requirements.txt b/memoryos-pypi/requirements.txt deleted file mode 100644 index f95b261..0000000 --- a/memoryos-pypi/requirements.txt +++ /dev/null @@ -1,21 +0,0 @@ -# MemoryOS Core Dependencies -# Core scientific computing and ML libraries -numpy==1.24.* -sentence-transformers>=2.2.0,<3.0.0 - -faiss-gpu>=1.7.0,<2.0.0 - -openai -# Web framework (for demo) -flask>=2.0.0,<3.0.0 - -# Optional utilities -python-dotenv>=0.19.0,<2.0.0 - -# Development and testing (optional) -# pytest>=7.0.0,<8.0.0 -# pytest-asyncio>=0.20.0,<1.0.0 - -# Additional dependencies for compatibility -typing-extensions>=4.0.0,<5.0.0 -regex>=2022.1.18 diff --git a/memoryos-pypi/retriever.py b/memoryos-pypi/retriever.py deleted file mode 100644 index 1ea1568..0000000 --- a/memoryos-pypi/retriever.py +++ /dev/null @@ -1,131 +0,0 @@ -from collections import deque -import heapq -from concurrent.futures import ThreadPoolExecutor, as_completed -from typing import Optional - -try: - from .utils import get_timestamp, OpenAIClient, run_parallel_tasks - from .short_term import ShortTermMemory - from .mid_term import MidTermMemory - from .long_term import LongTermMemory -except ImportError: - from utils import get_timestamp, OpenAIClient, run_parallel_tasks - from short_term import ShortTermMemory - from mid_term import MidTermMemory - from long_term import LongTermMemory -# from .updater import Updater # Updater is not directly used by Retriever - -class Retriever: - def __init__(self, - mid_term_memory: MidTermMemory, - long_term_memory: LongTermMemory, - assistant_long_term_memory: Optional[LongTermMemory] = None, # Add assistant LTM - # client: OpenAIClient, # Not strictly needed if all LLM calls are within memory modules - queue_capacity=7): # Default from main_memoybank was 7 for retrieval_queue - # Short term memory is usually for direct context, not primary retrieval source here - # self.short_term_memory = short_term_memory - self.mid_term_memory = mid_term_memory - self.long_term_memory = long_term_memory - self.assistant_long_term_memory = assistant_long_term_memory # Store assistant LTM reference - # self.client = client - self.retrieval_queue_capacity = queue_capacity - # self.retrieval_queue = deque(maxlen=queue_capacity) # This was instance level, but retrieve returns it, so maybe not needed as instance var - - def _retrieve_mid_term_context(self, user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions): - """并行任务:从中期记忆检索""" - print("Retriever: Searching mid-term memory...") - matched_sessions = self.mid_term_memory.search_sessions( - query_text=user_query, - segment_similarity_threshold=segment_similarity_threshold, - page_similarity_threshold=page_similarity_threshold, - top_k_sessions=top_k_sessions - ) - - # Use a heap to get top N pages across all relevant sessions based on their scores - top_pages_heap = [] - page_counter = 0 # Add counter to ensure unique comparison - for session_match in matched_sessions: - for page_match in session_match.get("matched_pages", []): - page_data = page_match["page_data"] - page_score = page_match["score"] # Using the page score directly - - # Add session relevance score to page score or combine them? - # For now, using page_score. Could be: page_score * session_match["session_relevance_score"] - combined_score = page_score # Potentially adjust with session_relevance_score - - if len(top_pages_heap) < self.retrieval_queue_capacity: - heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) - page_counter += 1 - elif combined_score > top_pages_heap[0][0]: # If current page is better than the worst in heap - heapq.heappop(top_pages_heap) - heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) - page_counter += 1 - - # Extract pages from heap, already sorted by heapq property (smallest first) - # We want highest scores, so either use a max-heap or sort after popping from min-heap. - retrieved_pages = [item[2] for item in sorted(top_pages_heap, key=lambda x: x[0], reverse=True)] - print(f"Retriever: Mid-term memory recalled {len(retrieved_pages)} pages.") - return retrieved_pages - - def _retrieve_user_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): - """并行任务:从用户长期知识检索""" - print("Retriever: Searching user long-term knowledge...") - retrieved_knowledge = self.long_term_memory.search_user_knowledge( - user_query, threshold=knowledge_threshold, top_k=top_k_knowledge - ) - print(f"Retriever: Long-term user knowledge recalled {len(retrieved_knowledge)} items.") - return retrieved_knowledge - - def _retrieve_assistant_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): - """并行任务:从助手长期知识检索""" - if not self.assistant_long_term_memory: - print("Retriever: No assistant long-term memory provided, skipping assistant knowledge retrieval.") - return [] - - print("Retriever: Searching assistant long-term knowledge...") - retrieved_knowledge = self.assistant_long_term_memory.search_assistant_knowledge( - user_query, threshold=knowledge_threshold, top_k=top_k_knowledge - ) - print(f"Retriever: Long-term assistant knowledge recalled {len(retrieved_knowledge)} items.") - return retrieved_knowledge - - def retrieve_context(self, user_query: str, - user_id: str, # Needed for profile, can be used for context filtering if desired - segment_similarity_threshold=0.1, # From main_memoybank example - page_similarity_threshold=0.1, # From main_memoybank example - knowledge_threshold=0.01, # From main_memoybank example - top_k_sessions=5, # From MidTermMemory search default - top_k_knowledge=20 # Default for knowledge search - ): - print(f"Retriever: Starting PARALLEL retrieval for query: '{user_query[:50]}...'") - - # 并行执行三个检索任务 - tasks = [ - lambda: self._retrieve_mid_term_context(user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions), - lambda: self._retrieve_user_knowledge(user_query, knowledge_threshold, top_k_knowledge), - lambda: self._retrieve_assistant_knowledge(user_query, knowledge_threshold, top_k_knowledge) - ] - - # 使用并行处理 - with ThreadPoolExecutor(max_workers=3) as executor: - futures = [] - for i, task in enumerate(tasks): - future = executor.submit(task) - futures.append((i, future)) - - results = [None] * 3 - for task_idx, future in futures: - try: - results[task_idx] = future.result() - except Exception as e: - print(f"Error in retrieval task {task_idx}: {e}") - results[task_idx] = [] - - retrieved_mid_term_pages, retrieved_user_knowledge, retrieved_assistant_knowledge = results - - return { - "retrieved_pages": retrieved_mid_term_pages or [], # List of page dicts - "retrieved_user_knowledge": retrieved_user_knowledge or [], # List of knowledge entry dicts - "retrieved_assistant_knowledge": retrieved_assistant_knowledge or [], # List of assistant knowledge entry dicts - "retrieved_at": get_timestamp() - } \ No newline at end of file diff --git a/memoryos-pypi/short_term.py b/memoryos-pypi/short_term.py deleted file mode 100644 index 37ffddc..0000000 --- a/memoryos-pypi/short_term.py +++ /dev/null @@ -1,64 +0,0 @@ -import json -from collections import deque -try: - from .utils import get_timestamp, ensure_directory_exists -except ImportError: - from utils import get_timestamp, ensure_directory_exists - -class ShortTermMemory: - def __init__(self, file_path, max_capacity=10): - self.max_capacity = max_capacity - self.file_path = file_path - ensure_directory_exists(self.file_path) - self.memory = deque(maxlen=max_capacity) - self.load() - - def add_qa_pair(self, qa_pair): - # Ensure timestamp exists, add if not - if 'timestamp' not in qa_pair or not qa_pair['timestamp']: - qa_pair["timestamp"] = get_timestamp() - - self.memory.append(qa_pair) - print(f"ShortTermMemory: Added QA. User: {qa_pair.get('user_input','')[:30]}...") - self.save() - - def get_all(self): - return list(self.memory) - - def is_full(self): - return len(self.memory) >= self.max_capacity # Use >= to be safe - - def pop_oldest(self): - if self.memory: - msg = self.memory.popleft() - print("ShortTermMemory: Evicted oldest QA pair.") - self.save() - return msg - return None - - def save(self): - try: - with open(self.file_path, "w", encoding="utf-8") as f: - json.dump(list(self.memory), f, ensure_ascii=False, indent=2) - except IOError as e: - print(f"Error saving ShortTermMemory to {self.file_path}: {e}") - - def load(self): - try: - with open(self.file_path, "r", encoding="utf-8") as f: - data = json.load(f) - # Ensure items are loaded correctly, especially if file was empty or malformed - if isinstance(data, list): - self.memory = deque(data, maxlen=self.max_capacity) - else: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: Loaded from {self.file_path}.") - except FileNotFoundError: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: No history file found at {self.file_path}. Initializing new memory.") - except json.JSONDecodeError: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") - except Exception as e: - self.memory = deque(maxlen=self.max_capacity) - print(f"ShortTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/test.py b/memoryos-pypi/test.py deleted file mode 100644 index 23de687..0000000 --- a/memoryos-pypi/test.py +++ /dev/null @@ -1,55 +0,0 @@ - -import os -from memoryos import Memoryos - -# --- Basic Configuration --- -USER_ID = "demo_user" -ASSISTANT_ID = "demo_assistant" -API_KEY = "" # Replace with your key -BASE_URL = "" # Optional: if using a custom OpenAI endpoint -DATA_STORAGE_PATH = "" -LLM_MODEL = "gpt-4o-mini" - -def simple_demo(): - print("MemoryOS Simple Demo") - - # 1. Initialize MemoryOS - print("Initializing MemoryOS...") - try: - memo = Memoryos( - user_id=USER_ID, - openai_api_key=API_KEY, - openai_base_url=BASE_URL, - data_storage_path=DATA_STORAGE_PATH, - llm_model=LLM_MODEL, - assistant_id=ASSISTANT_ID, - short_term_capacity=7, - mid_term_heat_threshold=5, - retrieval_queue_capacity=10, - long_term_knowledge_capacity=100, - mid_term_similarity_threshold=0.6 - ) - print("MemoryOS initialized successfully!\n") - except Exception as e: - print(f"Error: {e}") - return - - # 2. Add some basic memories - print("Adding some memories...") - - memo.add_memory( - user_input="Hi! I'm Tom, I work as a data scientist in San Francisco.", - agent_response="Hello Tom! Nice to meet you. Data science is such an exciting field. What kind of data do you work with?" - ) - - test_query = "What do you remember about my job?" - print(f"User: {test_query}") - - response = memo.get_response( - query=test_query, - ) - - print(f"Assistant: {response}") - -if __name__ == "__main__": - simple_demo() \ No newline at end of file diff --git a/memoryos-pypi/updater.py b/memoryos-pypi/updater.py deleted file mode 100644 index 4c2da85..0000000 --- a/memoryos-pypi/updater.py +++ /dev/null @@ -1,255 +0,0 @@ -try: - from .utils import ( - generate_id, get_timestamp, - gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, - llm_extract_keywords, run_parallel_tasks - ) - from .short_term import ShortTermMemory - from .mid_term import MidTermMemory - from .long_term import LongTermMemory -except ImportError: - from utils import ( - generate_id, get_timestamp, - gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, - llm_extract_keywords, run_parallel_tasks - ) - from short_term import ShortTermMemory - from mid_term import MidTermMemory - from long_term import LongTermMemory - -from concurrent.futures import ThreadPoolExecutor, as_completed - -class Updater: - def __init__(self, - short_term_memory: ShortTermMemory, - mid_term_memory: MidTermMemory, - long_term_memory: LongTermMemory, - client: OpenAIClient, - topic_similarity_threshold=0.5, - llm_model="gpt-4o-mini"): - self.short_term_memory = short_term_memory - self.mid_term_memory = mid_term_memory - self.long_term_memory = long_term_memory - self.client = client - self.topic_similarity_threshold = topic_similarity_threshold - self.last_evicted_page_for_continuity = None # Tracks the actual last page object for continuity checks - self.llm_model = llm_model - - def _process_page_embedding_and_keywords(self, page_data): - """并行处理单个页面的embedding和keywords生成""" - page_id = page_data.get("page_id", generate_id("page")) - - # 检查是否已有embedding和keywords - if "page_embedding" in page_data and page_data["page_embedding"] and \ - "page_keywords" in page_data and page_data["page_keywords"]: - print(f"Updater: Page {page_id} already has embedding and keywords, skipping computation") - return page_data - - full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" - - # 并行计算embedding和keywords(如果需要) - tasks = [] - if not ("page_embedding" in page_data and page_data["page_embedding"]): - tasks.append(('embedding', lambda: self._get_embedding_for_page(full_text))) - - if not ("page_keywords" in page_data and page_data["page_keywords"]): - tasks.append(('keywords', lambda: llm_extract_keywords(full_text, client=self.client))) - - if tasks: - with ThreadPoolExecutor(max_workers=2) as executor: - futures = {executor.submit(task[1]): task[0] for task in tasks} - results = {} - - for future in as_completed(futures): - task_type = futures[future] - try: - results[task_type] = future.result() - except Exception as e: - print(f"Error in {task_type} computation for page {page_id}: {e}") - results[task_type] = None - - # 更新页面数据 - if 'embedding' in results and results['embedding'] is not None: - from .utils import normalize_vector - page_data["page_embedding"] = normalize_vector(results['embedding']).tolist() - - if 'keywords' in results and results['keywords'] is not None: - page_data["page_keywords"] = list(results['keywords']) - - return page_data - - def _get_embedding_for_page(self, text): - """获取页面embedding的辅助方法""" - from .utils import get_embedding - return get_embedding(text) - - def _update_linked_pages_meta_info(self, start_page_id, new_meta_info): - """ - Updates meta_info for a chain of connected pages starting from start_page_id. - This is a simplified version. Assumes that once a chain is broken (no pre_page), - we don't need to go further back. Updates forward as well. - """ - # Go backward - q = [start_page_id] - visited = {start_page_id} - - head = 0 - while head < len(q): - current_page_id = q[head] - head += 1 - page = self.mid_term_memory.get_page_by_id(current_page_id) - if page: - page["meta_info"] = new_meta_info - # Check previous page - prev_id = page.get("pre_page") - if prev_id and prev_id not in visited: - q.append(prev_id) - visited.add(prev_id) - # Check next page - next_id = page.get("next_page") - if next_id and next_id not in visited: - q.append(next_id) - visited.add(next_id) - if q: # If any pages were updated - self.mid_term_memory.save() # Save mid-term memory after updates - - def process_short_term_to_mid_term(self): - evicted_qas = [] - while self.short_term_memory.is_full(): - qa = self.short_term_memory.pop_oldest() - if qa and qa.get("user_input") and qa.get("agent_response"): - evicted_qas.append(qa) - - if not evicted_qas: - print("Updater: No QAs evicted from short-term memory.") - return - - print(f"Updater: Processing {len(evicted_qas)} QAs from short-term to mid-term.") - - # 1. Create page structures and handle continuity within the evicted batch - current_batch_pages = [] - temp_last_page_in_batch = self.last_evicted_page_for_continuity # Carry over from previous batch if any - - for qa_pair in evicted_qas: - current_page_obj = { - "page_id": generate_id("page"), - "user_input": qa_pair.get("user_input", ""), - "agent_response": qa_pair.get("agent_response", ""), - "timestamp": qa_pair.get("timestamp", get_timestamp()), - "preloaded": False, # Default for new pages from short-term - "analyzed": False, # Default for new pages from short-term - "pre_page": None, - "next_page": None, - "meta_info": None - } - - is_continuous = check_conversation_continuity(temp_last_page_in_batch, current_page_obj, self.client, model=self.llm_model) - - if is_continuous and temp_last_page_in_batch: - current_page_obj["pre_page"] = temp_last_page_in_batch["page_id"] - # The actual next_page for temp_last_page_in_batch will be set when it's stored in mid-term - # or if it's already there, it needs an update. This linking is tricky. - # For now, we establish the link from current to previous. - # MidTermMemory's update_page_connections can fix the other side if pages are already there. - - # Meta info generation based on continuity - last_meta = temp_last_page_in_batch.get("meta_info") - new_meta = generate_page_meta_info(last_meta, current_page_obj, self.client, model=self.llm_model) - current_page_obj["meta_info"] = new_meta - # If temp_last_page_in_batch was part of a chain, its meta_info and subsequent ones should update. - # This implies that meta_info should perhaps be updated more globally or propagated. - # For now, new_meta applies to current_page_obj and potentially its chain. - # We can call _update_linked_pages_meta_info if temp_last_page_in_batch is in mid-term already. - if temp_last_page_in_batch.get("page_id") and self.mid_term_memory.get_page_by_id(temp_last_page_in_batch["page_id"]): - self._update_linked_pages_meta_info(temp_last_page_in_batch["page_id"], new_meta) - else: - # Start of a new chain or no previous page - current_page_obj["meta_info"] = generate_page_meta_info(None, current_page_obj, self.client, model=self.llm_model) - - current_batch_pages.append(current_page_obj) - temp_last_page_in_batch = current_page_obj # Update for the next iteration in this batch - - # Update the global last evicted page for the next run of this method - if current_batch_pages: - self.last_evicted_page_for_continuity = current_batch_pages[-1] - - # 2. Consolidate text from current_batch_pages for multi-summary - if not current_batch_pages: - return - - input_text_for_summary = "\n".join([ - f"User: {p.get('user_input','')}\nAssistant: {p.get('agent_response','')}" - for p in current_batch_pages - ]) - - print("Updater: Generating multi-topic summary for the evicted batch...") - multi_summary_result = gpt_generate_multi_summary(input_text_for_summary, self.client, model=self.llm_model) - - # 3. Insert pages into MidTermMemory based on summaries - if multi_summary_result and multi_summary_result.get("summaries"): - for summary_item in multi_summary_result["summaries"]: - theme_summary = summary_item.get("content", "General summary of recent interactions.") - theme_keywords = summary_item.get("keywords", []) - print(f"Updater: Processing theme '{summary_item.get('theme')}' for mid-term insertion.") - - # Pass the already processed pages (with IDs, embeddings to be added by MidTermMemory if not present) - self.mid_term_memory.insert_pages_into_session( - summary_for_new_pages=theme_summary, - keywords_for_new_pages=theme_keywords, - pages_to_insert=current_batch_pages, # These pages now have pre_page, next_page, meta_info set up - similarity_threshold=self.topic_similarity_threshold - ) - else: - # Fallback: if no summaries, add as one session or handle as a single block - print("Updater: No specific themes from multi-summary. Adding batch as a general session.") - fallback_summary = "General conversation segment from short-term memory." - fallback_keywords = llm_extract_keywords(input_text_for_summary, self.client, model=self.llm_model) if input_text_for_summary else [] - self.mid_term_memory.insert_pages_into_session( - summary_for_new_pages=fallback_summary, - keywords_for_new_pages=list(fallback_keywords), - pages_to_insert=current_batch_pages, - similarity_threshold=self.topic_similarity_threshold - ) - - # After pages are in mid-term, ensure their connections are doubly linked if needed. - # MidTermMemory.insert_pages_into_session should ideally handle this internally - # or we might need a separate pass to solidify connections after all insertions. - for page in current_batch_pages: - if page.get("pre_page"): - self.mid_term_memory.update_page_connections(page["pre_page"], page["page_id"]) - if page.get("next_page"): - self.mid_term_memory.update_page_connections(page["page_id"], page["next_page"]) # This seems redundant if next is set by prior - if current_batch_pages: # Save if any pages were processed - self.mid_term_memory.save() - - def update_long_term_from_analysis(self, user_id, profile_analysis_result): - """ - Updates long-term memory based on the results of a personality/knowledge analysis. - profile_analysis_result is expected to be a dict with keys like "profile", "private", "assistant_knowledge". - """ - if not profile_analysis_result: - print("Updater: No analysis result provided for long-term update.") - return - - new_profile_text = profile_analysis_result.get("profile") - if new_profile_text and new_profile_text.lower() != "none": - print(f"Updater: Updating user profile for {user_id} in LongTermMemory.") - # 直接使用新的分析结果作为完整画像,因为它应该已经是集成后的结果 - self.long_term_memory.update_user_profile(user_id, new_profile_text, merge=False) - - user_private_knowledge = profile_analysis_result.get("private") - if user_private_knowledge and user_private_knowledge.lower() != "none": - print(f"Updater: Adding user private knowledge for {user_id} to LongTermMemory.") - # Split if multiple lines, assuming each line is a distinct piece of knowledge - for line in user_private_knowledge.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.long_term_memory.add_user_knowledge(line.strip()) - - assistant_knowledge_text = profile_analysis_result.get("assistant_knowledge") - if assistant_knowledge_text and assistant_knowledge_text.lower() != "none": - print("Updater: Adding assistant knowledge to LongTermMemory.") - for line in assistant_knowledge_text.split('\n'): - if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: - self.long_term_memory.add_assistant_knowledge(line.strip()) - - # LongTermMemory.save() is called by its add/update methods \ No newline at end of file diff --git a/memoryos-pypi/utils.py b/memoryos-pypi/utils.py deleted file mode 100644 index 097a294..0000000 --- a/memoryos-pypi/utils.py +++ /dev/null @@ -1,351 +0,0 @@ -import time -import uuid -import openai -import numpy as np -from sentence_transformers import SentenceTransformer -import json -import os -try: - from . import prompts # 尝试相对导入 -except ImportError: - import prompts # 回退到绝对导入 -from openai import OpenAI -from concurrent.futures import ThreadPoolExecutor, as_completed -import threading - -def clean_reasoning_model_output(text): - """ - 清理推理模型输出中的标签 - 适配推理模型(如o1系列)的输出格式 - """ - if not text: - return text - - import re - # 移除...标签及其内容 - cleaned_text = re.sub(r'.*?', '', text, flags=re.DOTALL) - # 清理可能产生的多余空白行 - cleaned_text = re.sub(r'\n\s*\n\s*\n', '\n\n', cleaned_text) - # 移除开头和结尾的空白 - cleaned_text = cleaned_text.strip() - - return cleaned_text - -# ---- OpenAI Client ---- -class OpenAIClient: - def __init__(self, api_key, base_url=None, max_workers=5): - self.api_key = api_key - self.base_url = base_url if base_url else "https://api.openai.com/v1" - # The openai library looks for OPENAI_API_KEY and OPENAI_BASE_URL env vars by default - # or they can be passed directly to the client. - # For simplicity and explicit control, we'll pass them to the client constructor. - self.client = OpenAI(api_key=self.api_key, base_url=self.base_url) - self.executor = ThreadPoolExecutor(max_workers=max_workers) - self._lock = threading.Lock() - - def chat_completion(self, model, messages, temperature=0.7, max_tokens=2000): - print(f"Calling OpenAI API. Model: {model}") - try: - response = self.client.chat.completions.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens - ) - raw_content = response.choices[0].message.content.strip() - # 自动清理推理模型的标签 - cleaned_content = clean_reasoning_model_output(raw_content) - return cleaned_content - except Exception as e: - print(f"Error calling OpenAI API: {e}") - # Fallback or error handling - return "Error: Could not get response from LLM." - - def chat_completion_async(self, model, messages, temperature=0.7, max_tokens=2000): - """异步版本的chat_completion""" - return self.executor.submit(self.chat_completion, model, messages, temperature, max_tokens) - - def batch_chat_completion(self, requests): - """ - 并行处理多个LLM请求 - requests: List of dict with keys: model, messages, temperature, max_tokens - """ - futures = [] - for req in requests: - future = self.chat_completion_async( - model=req.get("model", - model=os.environ.get("llm_model") - ), - messages=req["messages"], - temperature=req.get("temperature", 0.7), - max_tokens=req.get("max_tokens", 2000) - ) - futures.append(future) - - results = [] - for future in as_completed(futures): - try: - result = future.result() - results.append(result) - except Exception as e: - print(f"Error in batch completion: {e}") - results.append("Error: Could not get response from LLM.") - - return results - - def shutdown(self): - """关闭线程池""" - self.executor.shutdown(wait=True) - -# ---- Parallel Processing Utilities ---- -def run_parallel_tasks(tasks, max_workers=3): - """ - 并行执行任务列表 - tasks: List of callable functions - """ - with ThreadPoolExecutor(max_workers=max_workers) as executor: - futures = [executor.submit(task) for task in tasks] - results = [] - for future in as_completed(futures): - try: - result = future.result() - results.append(result) - except Exception as e: - print(f"Error in parallel task: {e}") - results.append(None) - return results - -# ---- Basic Utilities ---- -def get_timestamp(): - return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - -def generate_id(prefix="id"): - return f"{prefix}_{uuid.uuid4().hex[:8]}" - -def ensure_directory_exists(path): - os.makedirs(os.path.dirname(path), exist_ok=True) - -# ---- Embedding Utilities ---- -_model_cache = {} -_embedding_cache = {} # 添加embedding缓存 - -def get_embedding(text, model_name="all-MiniLM-L6-v2", use_cache=True): - # 创建缓存键 - if use_cache: - cache_key = f"{model_name}::{hash(text)}" - if cache_key in _embedding_cache: - print(f"Using cached embedding for text: {text[:30]}...") - return _embedding_cache[cache_key] - - if model_name not in _model_cache: - print(f"Loading sentence transformer model: {model_name}") - _model_cache[model_name] = SentenceTransformer(model_name) - model = _model_cache[model_name] - embedding = model.encode([text], convert_to_numpy=True)[0] - - # 缓存结果 - if use_cache: - _embedding_cache[cache_key] = embedding - # 限制缓存大小,避免内存泄漏 - if len(_embedding_cache) > 10000: # 最多缓存10000个embedding - # 删除一些旧的缓存项 - keys_to_remove = list(_embedding_cache.keys())[:1000] - for key in keys_to_remove: - del _embedding_cache[key] - print("Cleaned embedding cache to prevent memory overflow") - - return embedding - -def clear_embedding_cache(): - """清空embedding缓存""" - global _embedding_cache - _embedding_cache.clear() - print("Embedding cache cleared") - -def normalize_vector(vec): - vec = np.array(vec, dtype=np.float32) - norm = np.linalg.norm(vec) - if norm == 0: - return vec - return vec / norm - -# ---- Time Decay Function ---- -def compute_time_decay(event_timestamp_str, current_timestamp_str, tau_hours=24): - from datetime import datetime - fmt = "%Y-%m-%d %H:%M:%S" - try: - t_event = datetime.strptime(event_timestamp_str, fmt) - t_current = datetime.strptime(current_timestamp_str, fmt) - delta_hours = (t_current - t_event).total_seconds() / 3600.0 - return np.exp(-delta_hours / tau_hours) - except ValueError: # Handle cases where timestamp might be invalid - return 0.1 # Default low recency - - -# ---- LLM-based Utility Functions ---- - -def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"): - model=os.environ.get("llm_model") or model - dialog_text = "\n".join([f"User: {d.get('user_input','')} Assistant: {d.get('agent_response','')}" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.SUMMARIZE_DIALOGS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.SUMMARIZE_DIALOGS_USER_PROMPT.format(dialog_text=dialog_text)} - ] - print("Calling LLM to generate topic summary...") - return client.chat_completion(model=model, messages=messages) - -def gpt_generate_multi_summary(text, client: OpenAIClient, model="gpt-4o-mini"): - model=os.environ.get("llm_model") or model - messages = [ - {"role": "system", "content": prompts.MULTI_SUMMARY_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.MULTI_SUMMARY_USER_PROMPT.format(text=text)} - ] - print("Calling LLM to generate multi-topic summary...") - response_text = client.chat_completion(model=model, messages=messages) - try: - summaries = json.loads(response_text) - except json.JSONDecodeError: - print(f"Warning: Could not parse multi-summary JSON: {response_text}") - summaries = [] # Return empty list or a default structure - return {"input": text, "summaries": summaries} - - -def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", existing_user_profile="None"): - """ - Analyze and update user personality profile from dialogs - 结合现有画像和新对话,直接输出更新后的完整画像 - """ - model=os.environ.get("llm_model") or model - conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.PERSONALITY_ANALYSIS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.PERSONALITY_ANALYSIS_USER_PROMPT.format( - conversation=conversation, - existing_user_profile=existing_user_profile - )} - ] - print("Calling LLM for user profile analysis and update...") - result_text = client.chat_completion(model=model, messages=messages) - return result_text.strip() if result_text else "None" - - -def gpt_knowledge_extraction(dialogs, client: OpenAIClient, model="gpt-4o-mini"): - """Extract user private data and assistant knowledge from dialogs""" - model=os.environ.get("llm_model") or model - conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) - messages = [ - {"role": "system", "content": prompts.KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.KNOWLEDGE_EXTRACTION_USER_PROMPT.format( - conversation=conversation - )} - ] - print("Calling LLM for knowledge extraction...") - result_text = client.chat_completion(model=model, messages=messages) - - private_data = "None" - assistant_knowledge = "None" - - try: - if "【User Private Data】" in result_text: - private_data_start = result_text.find("【User Private Data】") + len("【User Private Data】") - if "【Assistant Knowledge】" in result_text: - private_data_end = result_text.find("【Assistant Knowledge】") - private_data = result_text[private_data_start:private_data_end].strip() - - assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") - assistant_knowledge = result_text[assistant_knowledge_start:].strip() - else: - private_data = result_text[private_data_start:].strip() - elif "【Assistant Knowledge】" in result_text: - assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") - assistant_knowledge = result_text[assistant_knowledge_start:].strip() - - except Exception as e: - print(f"Error parsing knowledge extraction: {e}. Raw result: {result_text}") - - return { - "private": private_data if private_data else "None", - "assistant_knowledge": assistant_knowledge if assistant_knowledge else "None" - } - - -# Keep the old function for backward compatibility, but mark as deprecated -def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", known_user_traits="None"): - """ - DEPRECATED: Use gpt_user_profile_analysis and gpt_knowledge_extraction instead. - This function is kept for backward compatibility only. - """ - # Call the new functions - model=os.environ.get("llm_model") or model - profile = gpt_user_profile_analysis(dialogs, client, model, known_user_traits) - knowledge_data = gpt_knowledge_extraction(dialogs, client, model) - - return { - "profile": profile, - "private": knowledge_data["private"], - "assistant_knowledge": knowledge_data["assistant_knowledge"] - } - - -def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="gpt-4o-mini"): - model=os.environ.get("llm_model") or model - messages = [ - {"role": "system", "content": prompts.UPDATE_PROFILE_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.UPDATE_PROFILE_USER_PROMPT.format(old_profile=old_profile, new_analysis=new_analysis)} - ] - print("Calling LLM to update user profile...") - return client.chat_completion(model=model, messages=messages) - -def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"): - model=os.environ.get("llm_model") or model - - messages = [ - {"role": "system", "content": prompts.EXTRACT_THEME_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.EXTRACT_THEME_USER_PROMPT.format(answer_text=answer_text)} - ] - print("Calling LLM to extract theme...") - return client.chat_completion(model=model, messages=messages) - -def llm_extract_keywords(text, client: OpenAIClient, model="gpt-4o-mini"): - - model=os.environ.get("llm_model") or model - - messages = [ - {"role": "system", "content": prompts.EXTRACT_KEYWORDS_SYSTEM_PROMPT}, - {"role": "user", "content": prompts.EXTRACT_KEYWORDS_USER_PROMPT.format(text=text)} - ] - print("Calling LLM to extract keywords...") - response = client.chat_completion(model=model, messages=messages) - return [kw.strip() for kw in response.split(',') if kw.strip()] - -# ---- Functions from dynamic_update.py (to be used by Updater class) ---- -def check_conversation_continuity(previous_page, current_page, client: OpenAIClient, model="gpt-4o-mini"): - prev_user = previous_page.get("user_input", "") if previous_page else "" - prev_agent = previous_page.get("agent_response", "") if previous_page else "" - model=os.environ.get("llm_model") or model - - user_prompt = prompts.CONTINUITY_CHECK_USER_PROMPT.format( - prev_user=prev_user, - prev_agent=prev_agent, - curr_user=current_page.get("user_input", ""), - curr_agent=current_page.get("agent_response", "") - ) - messages = [ - {"role": "system", "content": prompts.CONTINUITY_CHECK_SYSTEM_PROMPT}, - {"role": "user", "content": user_prompt} - ] - response = client.chat_completion(model=model, messages=messages, temperature=0.0, max_tokens=10) - return response.strip().lower() == "true" - -def generate_page_meta_info(last_page_meta, current_page, client: OpenAIClient, model="gpt-4o-mini"): - model=os.environ.get("llm_model") or model - current_conversation = f"User: {current_page.get('user_input', '')}\nAssistant: {current_page.get('agent_response', '')}" - user_prompt = prompts.META_INFO_USER_PROMPT.format( - last_meta=last_page_meta if last_page_meta else "None", - new_dialogue=current_conversation - ) - messages = [ - {"role": "system", "content": prompts.META_INFO_SYSTEM_PROMPT}, - {"role": "user", "content": user_prompt} - ] - return client.chat_completion(model=model, messages=messages, temperature=0.3, max_tokens=100).strip() \ No newline at end of file From 8f1122f02fee01137952339583ddb46a2d039991 Mon Sep 17 00:00:00 2001 From: Kang Jiazheng <108711748+kkkjz@users.noreply.github.com> Date: Sun, 13 Jul 2025 15:45:56 +0800 Subject: [PATCH 3/4] Add files via upload --- memoryos-mcp/config.json | 15 + memoryos-mcp/mcp.json | 40 +++ memoryos-mcp/memoryos/__init__.py | 3 + memoryos-mcp/memoryos/long_term.py | 170 +++++++++++ memoryos-mcp/memoryos/memoryos.py | 362 +++++++++++++++++++++++ memoryos-mcp/memoryos/mid_term.py | 391 ++++++++++++++++++++++++ memoryos-mcp/memoryos/prompts.py | 235 +++++++++++++++ memoryos-mcp/memoryos/requirements.txt | 23 ++ memoryos-mcp/memoryos/retriever.py | 131 +++++++++ memoryos-mcp/memoryos/short_term.py | 64 ++++ memoryos-mcp/memoryos/test.py | 55 ++++ memoryos-mcp/memoryos/updater.py | 239 +++++++++++++++ memoryos-mcp/memoryos/utils.py | 393 +++++++++++++++++++++++++ memoryos-mcp/requirements.txt | 19 ++ memoryos-mcp/server_new.py | 292 ++++++++++++++++++ memoryos-mcp/test_simple.py | 268 +++++++++++++++++ 16 files changed, 2700 insertions(+) create mode 100644 memoryos-mcp/config.json create mode 100644 memoryos-mcp/mcp.json create mode 100644 memoryos-mcp/memoryos/__init__.py create mode 100644 memoryos-mcp/memoryos/long_term.py create mode 100644 memoryos-mcp/memoryos/memoryos.py create mode 100644 memoryos-mcp/memoryos/mid_term.py create mode 100644 memoryos-mcp/memoryos/prompts.py create mode 100644 memoryos-mcp/memoryos/requirements.txt create mode 100644 memoryos-mcp/memoryos/retriever.py create mode 100644 memoryos-mcp/memoryos/short_term.py create mode 100644 memoryos-mcp/memoryos/test.py create mode 100644 memoryos-mcp/memoryos/updater.py create mode 100644 memoryos-mcp/memoryos/utils.py create mode 100644 memoryos-mcp/requirements.txt create mode 100644 memoryos-mcp/server_new.py create mode 100644 memoryos-mcp/test_simple.py diff --git a/memoryos-mcp/config.json b/memoryos-mcp/config.json new file mode 100644 index 0000000..fb4869e --- /dev/null +++ b/memoryos-mcp/config.json @@ -0,0 +1,15 @@ +{ + "user_id": "test_user_001", + "openai_api_key": "", + "openai_base_url": "", + "data_storage_path": "./memoryos_data", + "assistant_id": "memoryos_assistant", + "short_term_capacity": 2, + "mid_term_capacity": 2000, + "embedding_model_name": "BAAI/bge-m3", + "long_term_knowledge_capacity": 100, + "retrieval_queue_capacity": 7, + "mid_term_heat_threshold": 7.0, + "mid_term_similarity_threshold": 0.6, + "llm_model": "gpt-4o-mini" +} \ No newline at end of file diff --git a/memoryos-mcp/mcp.json b/memoryos-mcp/mcp.json new file mode 100644 index 0000000..8677e30 --- /dev/null +++ b/memoryos-mcp/mcp.json @@ -0,0 +1,40 @@ +{ + "mcpServers": { + "memoryos": { + "command": "/root/miniconda3/envs/memos/bin/python", + "args": [ + "/root/autodl-tmp/memoryos-mcp/server_new.py", + "--config", + "/root/autodl-tmp/memoryos-mcp/config.json" + ], + "env": {}, + "description": "MemoryOS MCP Server - 智能记忆系统,提供记忆添加、检索和用户画像功能", + "capabilities": { + "tools": [ + { + "name": "add_memory", + "description": "Add new memory to the MemoryOS system. (user_input and assistant_response pair)" + }, + { + "name": "retrieve_memory", + "description": "Retrieve related memories and context information from MemoryOS based on the query" + }, + { + "name": "get_user_profile", + "description": "Get user profile information, including personality traits, preferences, and related knowledge" + } + ], + "resources": [ + { + "uri": "memoryos://status", + "name": "MemoryOS系统状态" + }, + { + "uri": "memoryos://config", + "name": "MemoryOS配置信息" + } + ] + } + } + } +} \ No newline at end of file diff --git a/memoryos-mcp/memoryos/__init__.py b/memoryos-mcp/memoryos/__init__.py new file mode 100644 index 0000000..b97e620 --- /dev/null +++ b/memoryos-mcp/memoryos/__init__.py @@ -0,0 +1,3 @@ +from .memoryos import Memoryos + +__all__ = ['Memoryos'] \ No newline at end of file diff --git a/memoryos-mcp/memoryos/long_term.py b/memoryos-mcp/memoryos/long_term.py new file mode 100644 index 0000000..4e49b36 --- /dev/null +++ b/memoryos-mcp/memoryos/long_term.py @@ -0,0 +1,170 @@ +import json +import numpy as np +import faiss +from collections import deque +try: + from .utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists +except ImportError: + from utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists + +class LongTermMemory: + def __init__(self, file_path, knowledge_capacity=100, embedding_model_name: str = "all-MiniLM-L6-v2", embedding_model_kwargs: dict = None): + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.knowledge_capacity = knowledge_capacity + self.user_profiles = {} # {user_id: {data: "profile_string", "last_updated": "timestamp"}} + # Use deques for knowledge bases to easily manage capacity + self.knowledge_base = deque(maxlen=self.knowledge_capacity) # For general/user private knowledge + self.assistant_knowledge = deque(maxlen=self.knowledge_capacity) # For assistant specific knowledge + + self.embedding_model_name = embedding_model_name + self.embedding_model_kwargs = embedding_model_kwargs if embedding_model_kwargs is not None else {} + self.load() + + def update_user_profile(self, user_id, new_data, merge=True): + if merge and user_id in self.user_profiles and self.user_profiles[user_id].get("data"): # Check if data exists + current_data = self.user_profiles[user_id]["data"] + if isinstance(current_data, str) and isinstance(new_data, str): + updated_data = f"{current_data}\n\n--- Updated on {get_timestamp()} ---\n{new_data}" + else: # Fallback to overwrite if types are not strings or for more complex merge + updated_data = new_data + else: + # If merge=False or no existing data, replace with new data + updated_data = new_data + + self.user_profiles[user_id] = { + "data": updated_data, + "last_updated": get_timestamp() + } + print(f"LongTermMemory: Updated user profile for {user_id} (merge={merge}).") + self.save() + + def get_raw_user_profile(self, user_id): + return self.user_profiles.get(user_id, {}).get("data", "None") # Return "None" string if not found + + def get_user_profile_data(self, user_id): + return self.user_profiles.get(user_id, {}) + + def add_knowledge_entry(self, knowledge_text, knowledge_deque: deque, type_name="knowledge"): + if not knowledge_text or knowledge_text.strip().lower() in ["", "none", "- none", "- none."]: + print(f"LongTermMemory: Empty {type_name} received, not saving.") + return + + # If deque is full, the oldest item is automatically removed when appending. + vec = get_embedding( + knowledge_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + vec = normalize_vector(vec).tolist() + entry = { + "knowledge": knowledge_text, + "timestamp": get_timestamp(), + "knowledge_embedding": vec + } + knowledge_deque.append(entry) + print(f"LongTermMemory: Added {type_name}. Current count: {len(knowledge_deque)}.") + self.save() + + def add_user_knowledge(self, knowledge_text): + self.add_knowledge_entry(knowledge_text, self.knowledge_base, "user knowledge") + + def add_assistant_knowledge(self, knowledge_text): + self.add_knowledge_entry(knowledge_text, self.assistant_knowledge, "assistant knowledge") + + def get_user_knowledge(self): + return list(self.knowledge_base) + + def get_assistant_knowledge(self): + return list(self.assistant_knowledge) + + def _search_knowledge_deque(self, query, knowledge_deque: deque, threshold=0.1, top_k=5): + if not knowledge_deque: + return [] + + query_vec = get_embedding( + query, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + query_vec = normalize_vector(query_vec) + + embeddings = [] + valid_entries = [] + for entry in knowledge_deque: + if "knowledge_embedding" in entry and entry["knowledge_embedding"]: + embeddings.append(np.array(entry["knowledge_embedding"], dtype=np.float32)) + valid_entries.append(entry) + else: + print(f"Warning: Entry without embedding found in knowledge_deque: {entry.get('knowledge','N/A')[:50]}") + + if not embeddings: + return [] + + embeddings_np = np.array(embeddings, dtype=np.float32) + if embeddings_np.ndim == 1: # Single item case + if embeddings_np.shape[0] == 0: return [] # Empty embeddings + embeddings_np = embeddings_np.reshape(1, -1) + + if embeddings_np.shape[0] == 0: # No valid embeddings + return [] + + dim = embeddings_np.shape[1] + index = faiss.IndexFlatIP(dim) # Using Inner Product for similarity + index.add(embeddings_np) + + query_arr = np.array([query_vec], dtype=np.float32) + distances, indices = index.search(query_arr, min(top_k, len(valid_entries))) # Search at most k or length of valid_entries + + results = [] + for i, idx in enumerate(indices[0]): + if idx != -1: # faiss returns -1 for no valid index + similarity_score = float(distances[0][i]) # For IndexFlatIP, distance is the dot product (similarity) + if similarity_score >= threshold: + results.append(valid_entries[idx]) # Add the original entry dict + + # Sort by similarity score descending before returning, as faiss might not guarantee order for IP + results.sort(key=lambda x: float(np.dot(np.array(x["knowledge_embedding"], dtype=np.float32), query_vec)), reverse=True) + return results + + def search_user_knowledge(self, query, threshold=0.1, top_k=5): + results = self._search_knowledge_deque(query, self.knowledge_base, threshold, top_k) + print(f"LongTermMemory: Searched user knowledge for '{query[:30]}...'. Found {len(results)} matches.") + return results + + def search_assistant_knowledge(self, query, threshold=0.1, top_k=5): + results = self._search_knowledge_deque(query, self.assistant_knowledge, threshold, top_k) + print(f"LongTermMemory: Searched assistant knowledge for '{query[:30]}...'. Found {len(results)} matches.") + return results + + def save(self): + data = { + "user_profiles": self.user_profiles, + "knowledge_base": list(self.knowledge_base), # Convert deques to lists for JSON serialization + "assistant_knowledge": list(self.assistant_knowledge) + } + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving LongTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + self.user_profiles = data.get("user_profiles", {}) + # Load into deques, respecting maxlen + kb_data = data.get("knowledge_base", []) + self.knowledge_base = deque(kb_data, maxlen=self.knowledge_capacity) + + ak_data = data.get("assistant_knowledge", []) + self.assistant_knowledge = deque(ak_data, maxlen=self.knowledge_capacity) + + print(f"LongTermMemory: Loaded from {self.file_path}.") + except FileNotFoundError: + print(f"LongTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + print(f"LongTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + print(f"LongTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/memoryos.py b/memoryos-mcp/memoryos/memoryos.py new file mode 100644 index 0000000..ae7da09 --- /dev/null +++ b/memoryos-mcp/memoryos/memoryos.py @@ -0,0 +1,362 @@ +import os +import json +from concurrent.futures import ThreadPoolExecutor, as_completed + +# 修改为绝对导入 +try: + # 尝试相对导入(当作为包使用时) + from .utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists + from . import prompts + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic + from .long_term import LongTermMemory + from .updater import Updater + from .retriever import Retriever +except ImportError: + # 回退到绝对导入(当作为独立模块使用时) + from utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists + import prompts + from short_term import ShortTermMemory + from mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic + from long_term import LongTermMemory + from updater import Updater + from retriever import Retriever + +# Heat threshold for triggering profile/knowledge update from mid-term memory +H_PROFILE_UPDATE_THRESHOLD = 5.0 +DEFAULT_ASSISTANT_ID = "default_assistant_profile" + +class Memoryos: + def __init__(self, user_id: str, + openai_api_key: str, + data_storage_path: str, + openai_base_url: str = None, + assistant_id: str = DEFAULT_ASSISTANT_ID, + short_term_capacity=10, + mid_term_capacity=2000, + long_term_knowledge_capacity=100, + retrieval_queue_capacity=7, + mid_term_heat_threshold=H_PROFILE_UPDATE_THRESHOLD, + mid_term_similarity_threshold=0.6, + llm_model="gpt-4o-mini", + embedding_model_name: str = "all-MiniLM-L6-v2", + embedding_model_kwargs: dict = None + ): + self.user_id = user_id + self.assistant_id = assistant_id + self.data_storage_path = os.path.abspath(data_storage_path) + self.llm_model = llm_model + self.mid_term_similarity_threshold = mid_term_similarity_threshold + self.embedding_model_name = embedding_model_name + + # Smart defaults for embedding_model_kwargs + if embedding_model_kwargs is None: + if 'bge-m3' in self.embedding_model_name.lower(): + print("INFO: Detected bge-m3 model, defaulting embedding_model_kwargs to {'use_fp16': True}") + self.embedding_model_kwargs = {'use_fp16': True} + else: + self.embedding_model_kwargs = {} + else: + self.embedding_model_kwargs = embedding_model_kwargs + + + print(f"Initializing Memoryos for user '{self.user_id}' and assistant '{self.assistant_id}'. Data path: {self.data_storage_path}") + print(f"Using unified LLM model: {self.llm_model}") + print(f"Using embedding model: {self.embedding_model_name} with kwargs: {self.embedding_model_kwargs}") + + # Initialize OpenAI Client + self.client = OpenAIClient(api_key=openai_api_key, base_url=openai_base_url) + + # Define file paths for user-specific data + self.user_data_dir = os.path.join(self.data_storage_path, "users", self.user_id) + user_short_term_path = os.path.join(self.user_data_dir, "short_term.json") + user_mid_term_path = os.path.join(self.user_data_dir, "mid_term.json") + user_long_term_path = os.path.join(self.user_data_dir, "long_term_user.json") # User profile and their knowledge + + # Define file paths for assistant-specific data (knowledge) + self.assistant_data_dir = os.path.join(self.data_storage_path, "assistants", self.assistant_id) + assistant_long_term_path = os.path.join(self.assistant_data_dir, "long_term_assistant.json") + + # Ensure directories exist + ensure_directory_exists(user_short_term_path) # ensure_directory_exists operates on the file path, creating parent dirs + ensure_directory_exists(user_mid_term_path) + ensure_directory_exists(user_long_term_path) + ensure_directory_exists(assistant_long_term_path) + + # Initialize Memory Modules for User + self.short_term_memory = ShortTermMemory(file_path=user_short_term_path, max_capacity=short_term_capacity) + self.mid_term_memory = MidTermMemory( + file_path=user_mid_term_path, + client=self.client, + max_capacity=mid_term_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + self.user_long_term_memory = LongTermMemory( + file_path=user_long_term_path, + knowledge_capacity=long_term_knowledge_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + + # Initialize Memory Module for Assistant Knowledge + self.assistant_long_term_memory = LongTermMemory( + file_path=assistant_long_term_path, + knowledge_capacity=long_term_knowledge_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + + # Initialize Orchestration Modules + self.updater = Updater(short_term_memory=self.short_term_memory, + mid_term_memory=self.mid_term_memory, + long_term_memory=self.user_long_term_memory, # Updater primarily updates user's LTM profile/knowledge + client=self.client, + topic_similarity_threshold=mid_term_similarity_threshold, # 传递中期记忆相似度阈值 + llm_model=self.llm_model) + self.retriever = Retriever( + mid_term_memory=self.mid_term_memory, + long_term_memory=self.user_long_term_memory, + assistant_long_term_memory=self.assistant_long_term_memory, # Pass assistant LTM + queue_capacity=retrieval_queue_capacity + ) + + self.mid_term_heat_threshold = mid_term_heat_threshold + + def _trigger_profile_and_knowledge_update_if_needed(self): + """ + Checks mid-term memory for hot segments and triggers profile/knowledge update if threshold is met. + Adapted from main_memoybank.py's update_user_profile_from_top_segment. + Enhanced with parallel LLM processing for better performance. + """ + if not self.mid_term_memory.heap: + return + + # Peek at the top of the heap (hottest segment) + # MidTermMemory heap stores (-H_segment, sid) + neg_heat, sid = self.mid_term_memory.heap[0] + current_heat = -neg_heat + + if current_heat >= self.mid_term_heat_threshold: + session = self.mid_term_memory.sessions.get(sid) + if not session: + self.mid_term_memory.rebuild_heap() # Clean up if session is gone + return + + # Get unanalyzed pages from this hot session + # A page is a dict: {"user_input": ..., "agent_response": ..., "timestamp": ..., "analyzed": False, ...} + unanalyzed_pages = [p for p in session.get("details", []) if not p.get("analyzed", False)] + + if unanalyzed_pages: + print(f"Memoryos: Mid-term session {sid} heat ({current_heat:.2f}) exceeded threshold. Analyzing {len(unanalyzed_pages)} pages for profile/knowledge update.") + + # 并行执行两个LLM任务:用户画像分析(已包含更新)、知识提取 + def task_user_profile_analysis(): + print("Memoryos: Starting parallel user profile analysis and update...") + # 获取现有用户画像 + existing_profile = self.user_long_term_memory.get_raw_user_profile(self.user_id) + if not existing_profile or existing_profile.lower() == "none": + existing_profile = "No existing profile data." + + # 直接输出更新后的完整画像 + return gpt_user_profile_analysis(unanalyzed_pages, self.client, model=self.llm_model, existing_user_profile=existing_profile) + + def task_knowledge_extraction(): + print("Memoryos: Starting parallel knowledge extraction...") + return gpt_knowledge_extraction(unanalyzed_pages, self.client, model=self.llm_model) + + # 使用并行任务执行 + with ThreadPoolExecutor(max_workers=2) as executor: + # 提交两个主要任务 + future_profile = executor.submit(task_user_profile_analysis) + future_knowledge = executor.submit(task_knowledge_extraction) + + # 等待结果 + try: + updated_user_profile = future_profile.result() # 直接是更新后的完整画像 + knowledge_result = future_knowledge.result() + except Exception as e: + print(f"Error in parallel LLM processing: {e}") + return + + new_user_private_knowledge = knowledge_result.get("private") + new_assistant_knowledge = knowledge_result.get("assistant_knowledge") + + # 直接使用更新后的完整用户画像 + if updated_user_profile and updated_user_profile.lower() != "none": + print("Memoryos: Updating user profile with integrated analysis...") + self.user_long_term_memory.update_user_profile(self.user_id, updated_user_profile, merge=False) # 直接替换为新的完整画像 + + # Add User Private Knowledge to user's LTM + if new_user_private_knowledge and new_user_private_knowledge.lower() != "none": + for line in new_user_private_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.user_long_term_memory.add_user_knowledge(line.strip()) + + # Add Assistant Knowledge to assistant's LTM + if new_assistant_knowledge and new_assistant_knowledge.lower() != "none": + for line in new_assistant_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.assistant_long_term_memory.add_assistant_knowledge(line.strip()) # Save to dedicated assistant LTM + + # Mark pages as analyzed and reset session heat contributors + for p in session["details"]: + p["analyzed"] = True # Mark all pages in session, or just unanalyzed_pages? + # Original code marked all pages in session + + session["N_visit"] = 0 # Reset visits after analysis + session["L_interaction"] = 0 # Reset interaction length contribution + # session["R_recency"] = 1.0 # Recency will re-calculate naturally + session["H_segment"] = compute_segment_heat(session) # Recompute heat with reset factors + session["last_visit_time"] = get_timestamp() # Update last visit time + + self.mid_term_memory.rebuild_heap() # Heap needs rebuild due to H_segment change + self.mid_term_memory.save() + print(f"Memoryos: Profile/Knowledge update for session {sid} complete. Heat reset.") + else: + print(f"Memoryos: Hot session {sid} has no unanalyzed pages. Skipping profile update.") + else: + # print(f"Memoryos: Top session {sid} heat ({current_heat:.2f}) below threshold. No profile update.") + pass # No action if below threshold + + def add_memory(self, user_input: str, agent_response: str, timestamp: str = None, meta_data: dict = None): + """ + Adds a new QA pair (memory) to the system. + meta_data is not used in the current refactoring but kept for future use. + """ + if not timestamp: + timestamp = get_timestamp() + + qa_pair = { + "user_input": user_input, + "agent_response": agent_response, + "timestamp": timestamp + # meta_data can be added here if it needs to be stored with the QA pair + } + self.short_term_memory.add_qa_pair(qa_pair) + print(f"Memoryos: Added QA to short-term. User: {user_input[:30]}...") + + if self.short_term_memory.is_full(): + print("Memoryos: Short-term memory full. Processing to mid-term.") + self.updater.process_short_term_to_mid_term() + + # After any memory addition that might impact mid-term, check for profile updates + self._trigger_profile_and_knowledge_update_if_needed() + + def get_response(self, query: str, relationship_with_user="friend", style_hint="", user_conversation_meta_data: dict = None) -> str: + """ + Generates a response to the user's query, incorporating memory and context. + """ + print(f"Memoryos: Generating response for query: '{query[:50]}...'") + + # 1. Retrieve context + retrieval_results = self.retriever.retrieve_context( + user_query=query, + user_id=self.user_id + # Using default thresholds from Retriever class for now + ) + retrieved_pages = retrieval_results["retrieved_pages"] + retrieved_user_knowledge = retrieval_results["retrieved_user_knowledge"] + retrieved_assistant_knowledge = retrieval_results["retrieved_assistant_knowledge"] + + # 2. Get short-term history + short_term_history = self.short_term_memory.get_all() + history_text = "\n".join([ + f"User: {qa.get('user_input', '')}\nAssistant: {qa.get('agent_response', '')} (Time: {qa.get('timestamp', '')})" + for qa in short_term_history + ]) + + # 3. Format retrieved mid-term pages (retrieval_queue equivalent) + retrieval_text = "\n".join([ + f"【Historical Memory】\nUser: {page.get('user_input', '')}\nAssistant: {page.get('agent_response', '')}\nTime: {page.get('timestamp', '')}\nConversation chain overview: {page.get('meta_info','N/A')}" + for page in retrieved_pages + ]) + + # 4. Get user profile + user_profile_text = self.user_long_term_memory.get_raw_user_profile(self.user_id) + if not user_profile_text or user_profile_text.lower() == "none": + user_profile_text = "No detailed profile available yet." + + # 5. Format retrieved user knowledge for background + user_knowledge_background = "" + if retrieved_user_knowledge: + user_knowledge_background = "\n【Relevant User Knowledge Entries】\n" + for kn_entry in retrieved_user_knowledge: + user_knowledge_background += f"- {kn_entry['knowledge']} (Recorded: {kn_entry['timestamp']})\n" + + background_context = f"【User Profile】\n{user_profile_text}\n{user_knowledge_background}" + + # 6. Format retrieved Assistant Knowledge (from assistant's LTM) + # Use retrieved assistant knowledge instead of all assistant knowledge + assistant_knowledge_text_for_prompt = "【Assistant Knowledge Base】\n" + if retrieved_assistant_knowledge: + for ak_entry in retrieved_assistant_knowledge: + assistant_knowledge_text_for_prompt += f"- {ak_entry['knowledge']} (Recorded: {ak_entry['timestamp']})\n" + else: + assistant_knowledge_text_for_prompt += "- No relevant assistant knowledge found for this query.\n" + + # 7. Format user_conversation_meta_data (if provided) + meta_data_text_for_prompt = "【Current Conversation Metadata】\n" + if user_conversation_meta_data: + try: + meta_data_text_for_prompt += json.dumps(user_conversation_meta_data, ensure_ascii=False, indent=2) + except TypeError: + meta_data_text_for_prompt += str(user_conversation_meta_data) + else: + meta_data_text_for_prompt += "None provided for this turn." + + # 8. Construct Prompts + system_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT.format( + relationship=relationship_with_user, + assistant_knowledge_text=assistant_knowledge_text_for_prompt, + meta_data_text=meta_data_text_for_prompt # Using meta_data_text placeholder for user_conversation_meta_data + ) + + user_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_USER_PROMPT.format( + history_text=history_text, + retrieval_text=retrieval_text, + background=background_context, + relationship=relationship_with_user, + query=query + ) + + messages = [ + {"role": "system", "content": system_prompt_text}, + {"role": "user", "content": user_prompt_text} + ] + + # 9. Call LLM for response + print("Memoryos: Calling LLM for final response generation...") + # print("System Prompt:\n", system_prompt_text) + # print("User Prompt:\n", user_prompt_text) + response_content = self.client.chat_completion( + model=self.llm_model, + messages=messages, + temperature=0.7, + max_tokens=1500 # As in original main + ) + + # 10. Add this interaction to memory + self.add_memory(user_input=query, agent_response=response_content, timestamp=get_timestamp()) + + return response_content + + # --- Helper/Maintenance methods (optional additions) --- + def get_user_profile_summary(self) -> str: + return self.user_long_term_memory.get_raw_user_profile(self.user_id) + + def get_assistant_knowledge_summary(self) -> list: + return self.assistant_long_term_memory.get_assistant_knowledge() + + def force_mid_term_analysis(self): + """Forces analysis of all unanalyzed pages in the hottest mid-term segment if heat is above 0. + Useful for testing or manual triggering. + """ + original_threshold = self.mid_term_heat_threshold + self.mid_term_heat_threshold = 0.0 # Temporarily lower threshold + print("Memoryos: Force-triggering mid-term analysis...") + self._trigger_profile_and_knowledge_update_if_needed() + self.mid_term_heat_threshold = original_threshold # Restore original threshold + + def __repr__(self): + return f"" \ No newline at end of file diff --git a/memoryos-mcp/memoryos/mid_term.py b/memoryos-mcp/memoryos/mid_term.py new file mode 100644 index 0000000..29811bb --- /dev/null +++ b/memoryos-mcp/memoryos/mid_term.py @@ -0,0 +1,391 @@ +import json +import numpy as np +from collections import defaultdict +import faiss +import heapq +from datetime import datetime + +try: + from .utils import ( + get_timestamp, generate_id, get_embedding, normalize_vector, + compute_time_decay, ensure_directory_exists, OpenAIClient + ) +except ImportError: + from utils import ( + get_timestamp, generate_id, get_embedding, normalize_vector, + compute_time_decay, ensure_directory_exists, OpenAIClient + ) + +# Heat computation constants (can be tuned or made configurable) +HEAT_ALPHA = 1.0 +HEAT_BETA = 1.0 +HEAT_GAMMA = 1 +RECENCY_TAU_HOURS = 24 # For R_recency calculation in compute_segment_heat + +def compute_segment_heat(session, alpha=HEAT_ALPHA, beta=HEAT_BETA, gamma=HEAT_GAMMA, tau_hours=RECENCY_TAU_HOURS): + N_visit = session.get("N_visit", 0) + L_interaction = session.get("L_interaction", 0) + + # Calculate recency based on last_visit_time + R_recency = 1.0 # Default if no last_visit_time + if session.get("last_visit_time"): + R_recency = compute_time_decay(session["last_visit_time"], get_timestamp(), tau_hours) + + session["R_recency"] = R_recency # Update session's recency factor + return alpha * N_visit + beta * L_interaction + gamma * R_recency + +class MidTermMemory: + def __init__(self, file_path: str, client: OpenAIClient, max_capacity=2000, embedding_model_name: str = "all-MiniLM-L6-v2", embedding_model_kwargs: dict = None): + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.client = client + self.max_capacity = max_capacity + self.sessions = {} # {session_id: session_object} + self.access_frequency = defaultdict(int) # {session_id: access_count_for_lfu} + self.heap = [] # Min-heap storing (-H_segment, session_id) for hottest segments + + self.embedding_model_name = embedding_model_name + self.embedding_model_kwargs = embedding_model_kwargs if embedding_model_kwargs is not None else {} + self.load() + + def get_page_by_id(self, page_id): + for session in self.sessions.values(): + for page in session.get("details", []): + if page.get("page_id") == page_id: + return page + return None + + def update_page_connections(self, prev_page_id, next_page_id): + if prev_page_id: + prev_page = self.get_page_by_id(prev_page_id) + if prev_page: + prev_page["next_page"] = next_page_id + if next_page_id: + next_page = self.get_page_by_id(next_page_id) + if next_page: + next_page["pre_page"] = prev_page_id + # self.save() # Avoid saving on every minor update; save at higher level operations + + def evict_lfu(self): + if not self.access_frequency or not self.sessions: + return + + lfu_sid = min(self.access_frequency, key=self.access_frequency.get) + print(f"MidTermMemory: LFU eviction. Session {lfu_sid} has lowest access frequency.") + + if lfu_sid not in self.sessions: + del self.access_frequency[lfu_sid] # Clean up access frequency if session already gone + self.rebuild_heap() + return + + session_to_delete = self.sessions.pop(lfu_sid) # Remove from sessions + del self.access_frequency[lfu_sid] # Remove from LFU tracking + + # Clean up page connections if this session's pages were linked + for page in session_to_delete.get("details", []): + prev_page_id = page.get("pre_page") + next_page_id = page.get("next_page") + # If a page from this session was linked to an external page, nullify the external link + if prev_page_id and not self.get_page_by_id(prev_page_id): # Check if prev page is still in memory + # This case should ideally not happen if connections are within sessions or handled carefully + pass + if next_page_id and not self.get_page_by_id(next_page_id): + pass + # More robustly, one might need to search all other sessions if inter-session linking was allowed + # For now, assuming internal consistency or that MemoryOS class manages higher-level links + + self.rebuild_heap() + self.save() + print(f"MidTermMemory: Evicted session {lfu_sid}.") + + def add_session(self, summary, details, summary_keywords=None): + session_id = generate_id("session") + summary_vec = get_embedding( + summary, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + summary_vec = normalize_vector(summary_vec).tolist() + summary_keywords = summary_keywords if summary_keywords is not None else [] + + processed_details = [] + for page_data in details: + page_id = page_data.get("page_id", generate_id("page")) + + # 检查是否已有embedding,避免重复计算 + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"MidTermMemory: Reusing existing embedding for page {page_id}") + inp_vec = page_data["page_embedding"] + # 确保embedding是normalized的 + if isinstance(inp_vec, list): + inp_vec_np = np.array(inp_vec, dtype=np.float32) + if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize + inp_vec = normalize_vector(inp_vec_np).tolist() + else: + print(f"MidTermMemory: Computing new embedding for page {page_id}") + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + inp_vec = get_embedding( + full_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + inp_vec = normalize_vector(inp_vec).tolist() + + # 使用已有keywords或设置为空(由multi-summary提供) + if "page_keywords" in page_data and page_data["page_keywords"]: + print(f"MidTermMemory: Using existing keywords for page {page_id}") + page_keywords = page_data["page_keywords"] + else: + print(f"MidTermMemory: Setting empty keywords for page {page_id} (will be filled by multi-summary)") + page_keywords = [] + + processed_page = { + **page_data, # Carry over existing fields like user_input, agent_response, timestamp + "page_id": page_id, + "page_embedding": inp_vec, + "page_keywords": page_keywords, + "preloaded": page_data.get("preloaded", False), # Preserve if passed + "analyzed": page_data.get("analyzed", False), # Preserve if passed + # pre_page, next_page, meta_info are handled by DynamicUpdater + } + processed_details.append(processed_page) + + current_ts = get_timestamp() + session_obj = { + "id": session_id, + "summary": summary, + "summary_keywords": summary_keywords, + "summary_embedding": summary_vec, + "details": processed_details, + "L_interaction": len(processed_details), + "R_recency": 1.0, # Initial recency + "N_visit": 0, + "H_segment": 0.0, # Initial heat, will be computed + "timestamp": current_ts, # Creation timestamp + "last_visit_time": current_ts, # Also initial last_visit_time for recency calc + "access_count_lfu": 0 # For LFU eviction policy + } + session_obj["H_segment"] = compute_segment_heat(session_obj) + self.sessions[session_id] = session_obj + self.access_frequency[session_id] = 0 # Initialize for LFU + heapq.heappush(self.heap, (-session_obj["H_segment"], session_id)) # Use negative heat for max-heap behavior + + print(f"MidTermMemory: Added new session {session_id}. Initial heat: {session_obj['H_segment']:.2f}.") + if len(self.sessions) > self.max_capacity: + self.evict_lfu() + self.save() + return session_id + + def rebuild_heap(self): + self.heap = [] + for sid, session_data in self.sessions.items(): + # Ensure H_segment is up-to-date before rebuilding heap if necessary + # session_data["H_segment"] = compute_segment_heat(session_data) + heapq.heappush(self.heap, (-session_data["H_segment"], sid)) + # heapq.heapify(self.heap) # Not needed if pushing one by one + # No save here, it's an internal operation often followed by other ops that save + + def insert_pages_into_session(self, summary_for_new_pages, keywords_for_new_pages, pages_to_insert, + similarity_threshold=0.6, keyword_similarity_alpha=1.0): + if not self.sessions: # If no existing sessions, just add as a new one + print("MidTermMemory: No existing sessions. Adding new session directly.") + return self.add_session(summary_for_new_pages, pages_to_insert, keywords_for_new_pages) + + new_summary_vec = get_embedding( + summary_for_new_pages, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + new_summary_vec = normalize_vector(new_summary_vec) + + best_sid = None + best_overall_score = -1 + + for sid, existing_session in self.sessions.items(): + existing_summary_vec = np.array(existing_session["summary_embedding"], dtype=np.float32) + semantic_sim = float(np.dot(existing_summary_vec, new_summary_vec)) + + # Keyword similarity (Jaccard index based) + existing_keywords = set(existing_session.get("summary_keywords", [])) + new_keywords_set = set(keywords_for_new_pages) + s_topic_keywords = 0 + if existing_keywords and new_keywords_set: + intersection = len(existing_keywords.intersection(new_keywords_set)) + union = len(existing_keywords.union(new_keywords_set)) + if union > 0: + s_topic_keywords = intersection / union + + overall_score = semantic_sim + keyword_similarity_alpha * s_topic_keywords + + if overall_score > best_overall_score: + best_overall_score = overall_score + best_sid = sid + + if best_sid and best_overall_score >= similarity_threshold: + print(f"MidTermMemory: Merging pages into session {best_sid}. Score: {best_overall_score:.2f} (Threshold: {similarity_threshold})") + target_session = self.sessions[best_sid] + + processed_new_pages = [] + for page_data in pages_to_insert: + page_id = page_data.get("page_id", generate_id("page")) # Use existing or generate new ID + + # 检查是否已有embedding,避免重复计算 + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"MidTermMemory: Reusing existing embedding for page {page_id}") + inp_vec = page_data["page_embedding"] + # 确保embedding是normalized的 + if isinstance(inp_vec, list): + inp_vec_np = np.array(inp_vec, dtype=np.float32) + if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize + inp_vec = normalize_vector(inp_vec_np).tolist() + else: + print(f"MidTermMemory: Computing new embedding for page {page_id}") + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + inp_vec = get_embedding( + full_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + inp_vec = normalize_vector(inp_vec).tolist() + + # 使用已有keywords或继承session的keywords + if "page_keywords" in page_data and page_data["page_keywords"]: + print(f"MidTermMemory: Using existing keywords for page {page_id}") + page_keywords_current = page_data["page_keywords"] + else: + print(f"MidTermMemory: Using session keywords for page {page_id}") + page_keywords_current = keywords_for_new_pages + + processed_page = { + **page_data, # Carry over existing fields + "page_id": page_id, + "page_embedding": inp_vec, + "page_keywords": page_keywords_current, + # analyzed, preloaded flags should be part of page_data if set + } + target_session["details"].append(processed_page) + processed_new_pages.append(processed_page) + + target_session["L_interaction"] += len(pages_to_insert) + target_session["last_visit_time"] = get_timestamp() # Update last visit time on modification + target_session["H_segment"] = compute_segment_heat(target_session) + self.rebuild_heap() # Rebuild heap as heat has changed + self.save() + return best_sid + else: + print(f"MidTermMemory: No suitable session to merge (best score {best_overall_score:.2f} < threshold {similarity_threshold}). Creating new session.") + return self.add_session(summary_for_new_pages, pages_to_insert, keywords_for_new_pages) + + def search_sessions(self, query_text, segment_similarity_threshold=0.0, page_similarity_threshold=0.0, + top_k_sessions=6, keyword_alpha=1.0, recency_tau_search=3600): + if not self.sessions: + return [] + + query_vec = get_embedding( + query_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + query_vec = normalize_vector(query_vec) + query_keywords = set() # Keywords extraction removed, relying on semantic similarity + + candidate_sessions = [] + session_ids = list(self.sessions.keys()) + if not session_ids: return [] + + summary_embeddings_list = [self.sessions[s]["summary_embedding"] for s in session_ids] + summary_embeddings_np = np.array(summary_embeddings_list, dtype=np.float32) + + dim = summary_embeddings_np.shape[1] + index = faiss.IndexFlatIP(dim) # Inner product for similarity + index.add(summary_embeddings_np) + + query_arr_np = np.array([query_vec], dtype=np.float32) + distances, indices = index.search(query_arr_np, min(top_k_sessions, len(session_ids))) + + results = [] + current_time_str = get_timestamp() + + for i, idx in enumerate(indices[0]): + if idx == -1: continue + + session_id = session_ids[idx] + session = self.sessions[session_id] + semantic_sim_score = float(distances[0][i]) # This is the dot product + + # Keyword similarity for session summary + session_keywords = set(session.get("summary_keywords", [])) + s_topic_keywords = 0 + if query_keywords and session_keywords: + intersection = len(query_keywords.intersection(session_keywords)) + union = len(query_keywords.union(session_keywords)) + if union > 0: s_topic_keywords = intersection / union + + # Time decay for session recency in search scoring + # time_decay_factor = compute_time_decay(session["timestamp"], current_time_str, tau_hours=recency_tau_search) + + # Combined score for session relevance + session_relevance_score = (semantic_sim_score + keyword_alpha * s_topic_keywords) + + if session_relevance_score >= segment_similarity_threshold: + matched_pages_in_session = [] + for page in session.get("details", []): + page_embedding = np.array(page["page_embedding"], dtype=np.float32) + # page_keywords = set(page.get("page_keywords", [])) + + page_sim_score = float(np.dot(page_embedding, query_vec)) + # Can also add keyword sim for pages if needed, but keeping it simpler for now + + if page_sim_score >= page_similarity_threshold: + matched_pages_in_session.append({"page_data": page, "score": page_sim_score}) + + if matched_pages_in_session: + # Update session access stats + session["N_visit"] += 1 + session["last_visit_time"] = current_time_str + session["access_count_lfu"] = session.get("access_count_lfu", 0) + 1 + self.access_frequency[session_id] = session["access_count_lfu"] + session["H_segment"] = compute_segment_heat(session) + self.rebuild_heap() # Heat changed + + results.append({ + "session_id": session_id, + "session_summary": session["summary"], + "session_relevance_score": session_relevance_score, + "matched_pages": sorted(matched_pages_in_session, key=lambda x: x["score"], reverse=True) # Sort pages by score + }) + + self.save() # Save changes from access updates + # Sort final results by session_relevance_score + return sorted(results, key=lambda x: x["session_relevance_score"], reverse=True) + + def save(self): + # Make a copy for saving to avoid modifying heap during iteration if it happens + # Though current heap is list of tuples, so direct modification risk is low + # sessions_to_save = {sid: data for sid, data in self.sessions.items()} + data_to_save = { + "sessions": self.sessions, + "access_frequency": dict(self.access_frequency), # Convert defaultdict to dict for JSON + # Heap is derived, no need to save typically, but can if desired for faster load + # "heap_snapshot": self.heap + } + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(data_to_save, f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving MidTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + self.sessions = data.get("sessions", {}) + self.access_frequency = defaultdict(int, data.get("access_frequency", {})) + self.rebuild_heap() # Rebuild heap from loaded sessions + print(f"MidTermMemory: Loaded from {self.file_path}. Sessions: {len(self.sessions)}.") + except FileNotFoundError: + print(f"MidTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + print(f"MidTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + print(f"MidTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/prompts.py b/memoryos-mcp/memoryos/prompts.py new file mode 100644 index 0000000..46d68fb --- /dev/null +++ b/memoryos-mcp/memoryos/prompts.py @@ -0,0 +1,235 @@ +""" +This file stores all the prompts used by the Memoryos system. +""" + +# Prompt for generating system response (from main_memoybank.py, generate_system_response_with_meta) +GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT = ( + "As a communication expert with outstanding communication habits, you embody the role of {relationship} throughout the following dialogues.\n" + "Here are some of your distinctive personal traits and knowledge:\n{assistant_knowledge_text}\n" + "User's profile:\n" + "{meta_data_text}\n" + "Your task is to generate responses that align with these traits and maintain the tone.\n" +) + +GENERATE_SYSTEM_RESPONSE_USER_PROMPT = ( + "\n" + "Drawing from your recent conversation with the user:\n" + "{history_text}\n\n" + "\n" + "The memories linked to the ongoing conversation are:\n" + "{retrieval_text}\n\n" + "\n" + "During the conversation process between you and the user in the past, you found that the user has the following characteristics:\n" + "{background}\n\n" + "Now, please role-play as {relationship} to continue the dialogue between you and the user.\n" + "The user just said: {query}\n" + "Please respond to the user's statement using the following format (maximum 30 words, must be in English):\n " + "When answering questions, be sure to check whether the timestamp of the referenced information matches the timeframe of the question" +) + +# Prompt for assistant knowledge extraction (from utils.py, analyze_assistant_knowledge) +ASSISTANT_KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are an assistant knowledge extraction engine. Rules: +1. Extract ONLY explicit statements about the assistant's identity or knowledge. +2. Use concise and factual statements in the first person. +3. If no relevant information is found, output "None".""" + +ASSISTANT_KNOWLEDGE_EXTRACTION_USER_PROMPT = """ +# Assistant Knowledge Extraction Task +Analyze the conversation and extract any fact or identity traits about the assistant. +If no traits can be extracted, reply with "None". Use the following format for output: +The generated content should be as concise as possible — the more concise, the better. +【Assistant Knowledge】 +- [Fact 1] +- [Fact 2] +- (Or "None" if none found) + +Few-shot examples: +1. User: Can you recommend some movies. + AI: Yes, I recommend Interstellar. + Time: 2023-10-01 + 【Assistant Knowledge】 + - I recommend Interstellar on 2023-10-01. + +2. User: Can you help me with cooking recipes? + AI: Yes, I have extensive knowledge of cooking recipes and techniques. + Time: 2023-10-02 + 【Assistant Knowledge】 + - I have cooking recipes and techniques on 2023-10-02. + +3. User: That's interesting. I didn't know you could do that. + AI: I'm glad you find it interesting! + 【Assistant Knowledge】 + - None + +Conversation: +{conversation} +""" + +# Prompt for summarizing dialogs (from utils.py, gpt_summarize) +SUMMARIZE_DIALOGS_SYSTEM_PROMPT = "You are an expert in summarizing dialogue topics. Generate extremely concise and precise summaries. Be as brief as possible while capturing the essence." +SUMMARIZE_DIALOGS_USER_PROMPT = "Please generate an concise topic summary based on the following conversation. Keep it to 2-3 short sentences maximum:\n{dialog_text}\nConcise Summary:" + +# Prompt for multi-summary generation (from utils.py, gpt_generate_multi_summary) +MULTI_SUMMARY_SYSTEM_PROMPT = "You are an expert in analyzing dialogue topics. Generate concise summaries. No more than two topics. Be as brief as possible." +MULTI_SUMMARY_USER_PROMPT = ("Please analyze the following dialogue and generate extremely concise subtopic summaries (if applicable), with a maximum of two themes.\n" + "Each summary should be very brief - just a few words for the theme and content. Format as JSON array:\n" + "[\n {{\"theme\": \"Brief theme\", \"keywords\": [\"key1\", \"key2\"], \"content\": \"summary\"}}\n]\n" + "\nConversation content:\n{text}") + +# Prompt for personality analysis (NEW TEMPLATE) +PERSONALITY_ANALYSIS_SYSTEM_PROMPT = """You are a professional user preference analysis assistant. Your task is to analyze the user's personality preferences from the given dialogue based on the provided dimensions. + +For each dimension: +1. Carefully read the conversation and determine if the dimension is reflected. +2. If reflected, determine the user's preference level: High / Medium / Low, and briefly explain the reasoning, including time, people, and context if possible. +3. If the dimension is not reflected, do not extract or list it. + +Focus only on the user's preferences and traits for the personality analysis section. +Output only the user profile section. +""" + +PERSONALITY_ANALYSIS_USER_PROMPT = """Please analyze the latest user-AI conversation below and update the user profile based on the 90 personality preference dimensions. + +Here are the 90 dimensions and their explanations: + +[Psychological Model (Basic Needs & Personality)] +Extraversion: Preference for social activities. +Openness: Willingness to embrace new ideas and experiences. +Agreeableness: Tendency to be friendly and cooperative. +Conscientiousness: Responsibility and organizational ability. +Neuroticism: Emotional stability and sensitivity. +Physiological Needs: Concern for comfort and basic needs. +Need for Security: Emphasis on safety and stability. +Need for Belonging: Desire for group affiliation. +Need for Self-Esteem: Need for respect and recognition. +Cognitive Needs: Desire for knowledge and understanding. +Aesthetic Appreciation: Appreciation for beauty and art. +Self-Actualization: Pursuit of one's full potential. +Need for Order: Preference for cleanliness and organization. +Need for Autonomy: Preference for independent decision-making and action. +Need for Power: Desire to influence or control others. +Need for Achievement: Value placed on accomplishments. + +[AI Alignment Dimensions] +Helpfulness: Whether the AI's response is practically useful to the user. (This reflects user's expectation of AI) +Honesty: Whether the AI's response is truthful. (This reflects user's expectation of AI) +Safety: Avoidance of sensitive or harmful content. (This reflects user's expectation of AI) +Instruction Compliance: Strict adherence to user instructions. (This reflects user's expectation of AI) +Truthfulness: Accuracy and authenticity of content. (This reflects user's expectation of AI) +Coherence: Clarity and logical consistency of expression. (This reflects user's expectation of AI) +Complexity: Preference for detailed and complex information. +Conciseness: Preference for brief and clear responses. + +[Content Platform Interest Tags] +Science Interest: Interest in science topics. +Education Interest: Concern with education and learning. +Psychology Interest: Interest in psychology topics. +Family Concern: Interest in family and parenting. +Fashion Interest: Interest in fashion topics. +Art Interest: Engagement with or interest in art. +Health Concern: Concern with physical health and lifestyle. +Financial Management Interest: Interest in finance and budgeting. +Sports Interest: Interest in sports and physical activity. +Food Interest: Passion for cooking and cuisine. +Travel Interest: Interest in traveling and exploring new places. +Music Interest: Interest in music appreciation or creation. +Literature Interest: Interest in literature and reading. +Film Interest: Interest in movies and cinema. +Social Media Activity: Frequency and engagement with social media. +Tech Interest: Interest in technology and innovation. +Environmental Concern: Attention to environmental and sustainability issues. +History Interest: Interest in historical knowledge and topics. +Political Concern: Interest in political and social issues. +Religious Interest: Interest in religion and spirituality. +Gaming Interest: Enjoyment of video games or board games. +Animal Concern: Concern for animals or pets. +Emotional Expression: Preference for direct vs. restrained emotional expression. +Sense of Humor: Preference for humorous or serious communication style. +Information Density: Preference for detailed vs. concise information. +Language Style: Preference for formal vs. casual tone. +Practicality: Preference for practical advice vs. theoretical discussion. + +**Task Instructions:** +1. Review the existing user profile below +2. Analyze the new conversation for evidence of the 90 dimensions above +3. Update and integrate the findings into a comprehensive user profile +4. For each dimension that can be identified, use the format: Dimension ( Level(High/Medium/Low) ) +5. Include brief reasoning for each dimension when possible +6. Maintain existing insights from the old profile while incorporating new observations +7. If a dimension cannot be inferred from either the old profile or new conversation, do not include it + +**Existing User Profile:** +{existing_user_profile} + +**Latest User-AI Conversation:** +{conversation} + +**Updated User Profile:** +Please provide the comprehensive updated user profile below, combining insights from both the existing profile and new conversation:""" + +# Prompt for knowledge extraction (NEW) +KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are a knowledge extraction assistant. Your task is to extract user private data and assistant knowledge from conversations. + +Focus on: +1. User private data: personal information, preferences, or private facts about the user +2. Assistant knowledge: explicit statements about what the assistant did, provided, or demonstrated + +Be extremely concise and factual in your extractions. Use the shortest possible phrases. +""" + +KNOWLEDGE_EXTRACTION_USER_PROMPT = """Please extract user private data and assistant knowledge from the latest user-AI conversation below. + +Latest User-AI Conversation: +{conversation} + +【User Private Data】 +Extract personal information about the user. Be extremely concise - use shortest possible phrases: +- [Brief fact]: [Minimal context(Including entities and time)] +- [Brief fact]: [Minimal context(Including entities and time)] +- (If no private data found, write "None") + +【Assistant Knowledge】 +Extract what the assistant demonstrated. Use format "Assistant [action] at [time]". Be extremely brief: +- Assistant [brief action] at [time/context] +- Assistant [brief capability] during [brief context] +- (If no assistant knowledge found, write "None") +""" + +# Prompt for updating user profile (from utils.py, gpt_update_profile) +UPDATE_PROFILE_SYSTEM_PROMPT = "You are an expert in merging and updating user profiles. Integrate the new information into the old profile, maintaining consistency and improving the overall understanding of the user. Avoid redundancy. The new analysis is based on specific dimensions, try to incorporate these insights meaningfully." +UPDATE_PROFILE_USER_PROMPT = "Please update the following user profile based on the new analysis. If the old profile is empty or \"None\", create a new one based on the new analysis.\n\nOld User Profile:\n{old_profile}\n\nNew Analysis Data:\n{new_analysis}\n\nUpdated User Profile:" + +# Prompt for extracting theme (from utils.py, gpt_extract_theme) +EXTRACT_THEME_SYSTEM_PROMPT = "You are an expert in extracting the main theme from a text. Provide a concise theme." +EXTRACT_THEME_USER_PROMPT = "Please extract the main theme from the following text:\n{answer_text}\n\nTheme:" + +# Prompt for extracting keywords (from utils.py, llm_extract_keywords) +EXTRACT_KEYWORDS_SYSTEM_PROMPT = "You are an expert in keyword extraction. Extract only the most essential keywords from the text. Return 3-5 keywords maximum as a comma-separated list. Be extremely selective." +EXTRACT_KEYWORDS_USER_PROMPT = "Please extract the 3-5 most important keywords from the following text. Be very selective and concise:\n{text}\n\nKeywords:" + +# Prompt for conversation continuity check (from dynamic_update.py, _is_conversation_continuing) +CONTINUITY_CHECK_SYSTEM_PROMPT = "You are a conversation continuity detector. Return ONLY 'true' or 'false'." +CONTINUITY_CHECK_USER_PROMPT = ("Determine if these two conversation pages are continuous (true continuation without topic shift).\n" + "Return ONLY \"true\" or \"false\".\n\n" + "Previous Page:\nUser: {prev_user}\nAssistant: {prev_agent}\n\n" + "Current Page:\nUser: {curr_user}\nAssistant: {curr_agent}\n\n" + "Continuous?") + +# Prompt for generating meta info (from dynamic_update.py, _generate_meta_info) +META_INFO_SYSTEM_PROMPT = ("""You are a conversation meta-summary updater. Your task is to: +1. Preserve relevant context from previous meta-summary +2. Integrate new information from current dialogue +3. Output ONLY the updated summary (no explanations)""" ) +META_INFO_USER_PROMPT = ("""Update the conversation meta-summary by incorporating the new dialogue while maintaining continuity. + + Guidelines: + 1. Start from the previous meta-summary (if exists) + 2. Add/update information based on the new dialogue + 3. Keep it concise (1-2 sentences max) + 4. Maintain context coherence + + Previous Meta-summary: {last_meta} + New Dialogue: + {new_dialogue} + + Updated Meta-summary:""") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/requirements.txt b/memoryos-mcp/memoryos/requirements.txt new file mode 100644 index 0000000..0c8a8d4 --- /dev/null +++ b/memoryos-mcp/memoryos/requirements.txt @@ -0,0 +1,23 @@ +# MemoryOS Core Dependencies +# Core scientific computing and ML libraries +numpy==1.24.* +sentence-transformers>=2.7.0,<3.0.0 # Updated for Qwen model support +transformers>=4.51.0 # Required for newer sentence-transformer features +FlagEmbedding>=1.2.9 # For BGE-M3 model support + +faiss-gpu>=1.7.0,<2.0.0 +httpx[socks] +openai +# Web framework (for demo) +flask>=2.0.0,<3.0.0 + +# Optional utilities +python-dotenv>=0.19.0,<2.0.0 + +# Development and testing (optional) +# pytest>=7.0.0,<8.0.0 +# pytest-asyncio>=0.20.0,<1.0.0 + +# Additional dependencies for compatibility +typing-extensions>=4.0.0,<5.0.0 +regex>=2022.1.18 diff --git a/memoryos-mcp/memoryos/retriever.py b/memoryos-mcp/memoryos/retriever.py new file mode 100644 index 0000000..1ea1568 --- /dev/null +++ b/memoryos-mcp/memoryos/retriever.py @@ -0,0 +1,131 @@ +from collections import deque +import heapq +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Optional + +try: + from .utils import get_timestamp, OpenAIClient, run_parallel_tasks + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory + from .long_term import LongTermMemory +except ImportError: + from utils import get_timestamp, OpenAIClient, run_parallel_tasks + from short_term import ShortTermMemory + from mid_term import MidTermMemory + from long_term import LongTermMemory +# from .updater import Updater # Updater is not directly used by Retriever + +class Retriever: + def __init__(self, + mid_term_memory: MidTermMemory, + long_term_memory: LongTermMemory, + assistant_long_term_memory: Optional[LongTermMemory] = None, # Add assistant LTM + # client: OpenAIClient, # Not strictly needed if all LLM calls are within memory modules + queue_capacity=7): # Default from main_memoybank was 7 for retrieval_queue + # Short term memory is usually for direct context, not primary retrieval source here + # self.short_term_memory = short_term_memory + self.mid_term_memory = mid_term_memory + self.long_term_memory = long_term_memory + self.assistant_long_term_memory = assistant_long_term_memory # Store assistant LTM reference + # self.client = client + self.retrieval_queue_capacity = queue_capacity + # self.retrieval_queue = deque(maxlen=queue_capacity) # This was instance level, but retrieve returns it, so maybe not needed as instance var + + def _retrieve_mid_term_context(self, user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions): + """并行任务:从中期记忆检索""" + print("Retriever: Searching mid-term memory...") + matched_sessions = self.mid_term_memory.search_sessions( + query_text=user_query, + segment_similarity_threshold=segment_similarity_threshold, + page_similarity_threshold=page_similarity_threshold, + top_k_sessions=top_k_sessions + ) + + # Use a heap to get top N pages across all relevant sessions based on their scores + top_pages_heap = [] + page_counter = 0 # Add counter to ensure unique comparison + for session_match in matched_sessions: + for page_match in session_match.get("matched_pages", []): + page_data = page_match["page_data"] + page_score = page_match["score"] # Using the page score directly + + # Add session relevance score to page score or combine them? + # For now, using page_score. Could be: page_score * session_match["session_relevance_score"] + combined_score = page_score # Potentially adjust with session_relevance_score + + if len(top_pages_heap) < self.retrieval_queue_capacity: + heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) + page_counter += 1 + elif combined_score > top_pages_heap[0][0]: # If current page is better than the worst in heap + heapq.heappop(top_pages_heap) + heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) + page_counter += 1 + + # Extract pages from heap, already sorted by heapq property (smallest first) + # We want highest scores, so either use a max-heap or sort after popping from min-heap. + retrieved_pages = [item[2] for item in sorted(top_pages_heap, key=lambda x: x[0], reverse=True)] + print(f"Retriever: Mid-term memory recalled {len(retrieved_pages)} pages.") + return retrieved_pages + + def _retrieve_user_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): + """并行任务:从用户长期知识检索""" + print("Retriever: Searching user long-term knowledge...") + retrieved_knowledge = self.long_term_memory.search_user_knowledge( + user_query, threshold=knowledge_threshold, top_k=top_k_knowledge + ) + print(f"Retriever: Long-term user knowledge recalled {len(retrieved_knowledge)} items.") + return retrieved_knowledge + + def _retrieve_assistant_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): + """并行任务:从助手长期知识检索""" + if not self.assistant_long_term_memory: + print("Retriever: No assistant long-term memory provided, skipping assistant knowledge retrieval.") + return [] + + print("Retriever: Searching assistant long-term knowledge...") + retrieved_knowledge = self.assistant_long_term_memory.search_assistant_knowledge( + user_query, threshold=knowledge_threshold, top_k=top_k_knowledge + ) + print(f"Retriever: Long-term assistant knowledge recalled {len(retrieved_knowledge)} items.") + return retrieved_knowledge + + def retrieve_context(self, user_query: str, + user_id: str, # Needed for profile, can be used for context filtering if desired + segment_similarity_threshold=0.1, # From main_memoybank example + page_similarity_threshold=0.1, # From main_memoybank example + knowledge_threshold=0.01, # From main_memoybank example + top_k_sessions=5, # From MidTermMemory search default + top_k_knowledge=20 # Default for knowledge search + ): + print(f"Retriever: Starting PARALLEL retrieval for query: '{user_query[:50]}...'") + + # 并行执行三个检索任务 + tasks = [ + lambda: self._retrieve_mid_term_context(user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions), + lambda: self._retrieve_user_knowledge(user_query, knowledge_threshold, top_k_knowledge), + lambda: self._retrieve_assistant_knowledge(user_query, knowledge_threshold, top_k_knowledge) + ] + + # 使用并行处理 + with ThreadPoolExecutor(max_workers=3) as executor: + futures = [] + for i, task in enumerate(tasks): + future = executor.submit(task) + futures.append((i, future)) + + results = [None] * 3 + for task_idx, future in futures: + try: + results[task_idx] = future.result() + except Exception as e: + print(f"Error in retrieval task {task_idx}: {e}") + results[task_idx] = [] + + retrieved_mid_term_pages, retrieved_user_knowledge, retrieved_assistant_knowledge = results + + return { + "retrieved_pages": retrieved_mid_term_pages or [], # List of page dicts + "retrieved_user_knowledge": retrieved_user_knowledge or [], # List of knowledge entry dicts + "retrieved_assistant_knowledge": retrieved_assistant_knowledge or [], # List of assistant knowledge entry dicts + "retrieved_at": get_timestamp() + } \ No newline at end of file diff --git a/memoryos-mcp/memoryos/short_term.py b/memoryos-mcp/memoryos/short_term.py new file mode 100644 index 0000000..37ffddc --- /dev/null +++ b/memoryos-mcp/memoryos/short_term.py @@ -0,0 +1,64 @@ +import json +from collections import deque +try: + from .utils import get_timestamp, ensure_directory_exists +except ImportError: + from utils import get_timestamp, ensure_directory_exists + +class ShortTermMemory: + def __init__(self, file_path, max_capacity=10): + self.max_capacity = max_capacity + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.memory = deque(maxlen=max_capacity) + self.load() + + def add_qa_pair(self, qa_pair): + # Ensure timestamp exists, add if not + if 'timestamp' not in qa_pair or not qa_pair['timestamp']: + qa_pair["timestamp"] = get_timestamp() + + self.memory.append(qa_pair) + print(f"ShortTermMemory: Added QA. User: {qa_pair.get('user_input','')[:30]}...") + self.save() + + def get_all(self): + return list(self.memory) + + def is_full(self): + return len(self.memory) >= self.max_capacity # Use >= to be safe + + def pop_oldest(self): + if self.memory: + msg = self.memory.popleft() + print("ShortTermMemory: Evicted oldest QA pair.") + self.save() + return msg + return None + + def save(self): + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(list(self.memory), f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving ShortTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + # Ensure items are loaded correctly, especially if file was empty or malformed + if isinstance(data, list): + self.memory = deque(data, maxlen=self.max_capacity) + else: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: Loaded from {self.file_path}.") + except FileNotFoundError: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-mcp/memoryos/test.py b/memoryos-mcp/memoryos/test.py new file mode 100644 index 0000000..23de687 --- /dev/null +++ b/memoryos-mcp/memoryos/test.py @@ -0,0 +1,55 @@ + +import os +from memoryos import Memoryos + +# --- Basic Configuration --- +USER_ID = "demo_user" +ASSISTANT_ID = "demo_assistant" +API_KEY = "" # Replace with your key +BASE_URL = "" # Optional: if using a custom OpenAI endpoint +DATA_STORAGE_PATH = "" +LLM_MODEL = "gpt-4o-mini" + +def simple_demo(): + print("MemoryOS Simple Demo") + + # 1. Initialize MemoryOS + print("Initializing MemoryOS...") + try: + memo = Memoryos( + user_id=USER_ID, + openai_api_key=API_KEY, + openai_base_url=BASE_URL, + data_storage_path=DATA_STORAGE_PATH, + llm_model=LLM_MODEL, + assistant_id=ASSISTANT_ID, + short_term_capacity=7, + mid_term_heat_threshold=5, + retrieval_queue_capacity=10, + long_term_knowledge_capacity=100, + mid_term_similarity_threshold=0.6 + ) + print("MemoryOS initialized successfully!\n") + except Exception as e: + print(f"Error: {e}") + return + + # 2. Add some basic memories + print("Adding some memories...") + + memo.add_memory( + user_input="Hi! I'm Tom, I work as a data scientist in San Francisco.", + agent_response="Hello Tom! Nice to meet you. Data science is such an exciting field. What kind of data do you work with?" + ) + + test_query = "What do you remember about my job?" + print(f"User: {test_query}") + + response = memo.get_response( + query=test_query, + ) + + print(f"Assistant: {response}") + +if __name__ == "__main__": + simple_demo() \ No newline at end of file diff --git a/memoryos-mcp/memoryos/updater.py b/memoryos-mcp/memoryos/updater.py new file mode 100644 index 0000000..33de39b --- /dev/null +++ b/memoryos-mcp/memoryos/updater.py @@ -0,0 +1,239 @@ +try: + from .utils import ( + generate_id, get_timestamp, + gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, + run_parallel_tasks + ) + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory + from .long_term import LongTermMemory +except ImportError: + from utils import ( + generate_id, get_timestamp, + gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, + run_parallel_tasks + ) + from short_term import ShortTermMemory + from mid_term import MidTermMemory + from long_term import LongTermMemory + +from concurrent.futures import ThreadPoolExecutor, as_completed + +class Updater: + def __init__(self, + short_term_memory: ShortTermMemory, + mid_term_memory: MidTermMemory, + long_term_memory: LongTermMemory, + client: OpenAIClient, + topic_similarity_threshold=0.5, + llm_model="gpt-4o-mini"): + self.short_term_memory = short_term_memory + self.mid_term_memory = mid_term_memory + self.long_term_memory = long_term_memory + self.client = client + self.topic_similarity_threshold = topic_similarity_threshold + self.last_evicted_page_for_continuity = None # Tracks the actual last page object for continuity checks + self.llm_model = llm_model + + def _process_page_embedding_and_keywords(self, page_data): + """处理单个页面的embedding生成(关键词由multi-summary提供)""" + page_id = page_data.get("page_id", generate_id("page")) + + # 检查是否已有embedding + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"Updater: Page {page_id} already has embedding, skipping computation") + return page_data + + # 只处理embedding,关键词由multi-summary统一提供 + if not ("page_embedding" in page_data and page_data["page_embedding"]): + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + try: + embedding = self._get_embedding_for_page(full_text) + if embedding is not None: + from .utils import normalize_vector + page_data["page_embedding"] = normalize_vector(embedding).tolist() + print(f"Updater: Generated embedding for page {page_id}") + except Exception as e: + print(f"Error generating embedding for page {page_id}: {e}") + + # 设置空的关键词列表(将由multi-summary的关键词填充) + if "page_keywords" not in page_data: + page_data["page_keywords"] = [] + + return page_data + + def _get_embedding_for_page(self, text): + """获取页面embedding的辅助方法""" + from .utils import get_embedding + return get_embedding(text) + + def _update_linked_pages_meta_info(self, start_page_id, new_meta_info): + """ + Updates meta_info for a chain of connected pages starting from start_page_id. + This is a simplified version. Assumes that once a chain is broken (no pre_page), + we don't need to go further back. Updates forward as well. + """ + # Go backward + q = [start_page_id] + visited = {start_page_id} + + head = 0 + while head < len(q): + current_page_id = q[head] + head += 1 + page = self.mid_term_memory.get_page_by_id(current_page_id) + if page: + page["meta_info"] = new_meta_info + # Check previous page + prev_id = page.get("pre_page") + if prev_id and prev_id not in visited: + q.append(prev_id) + visited.add(prev_id) + # Check next page + next_id = page.get("next_page") + if next_id and next_id not in visited: + q.append(next_id) + visited.add(next_id) + if q: # If any pages were updated + self.mid_term_memory.save() # Save mid-term memory after updates + + def process_short_term_to_mid_term(self): + evicted_qas = [] + while self.short_term_memory.is_full(): + qa = self.short_term_memory.pop_oldest() + if qa and qa.get("user_input") and qa.get("agent_response"): + evicted_qas.append(qa) + + if not evicted_qas: + print("Updater: No QAs evicted from short-term memory.") + return + + print(f"Updater: Processing {len(evicted_qas)} QAs from short-term to mid-term.") + + # 1. Create page structures and handle continuity within the evicted batch + current_batch_pages = [] + temp_last_page_in_batch = self.last_evicted_page_for_continuity # Carry over from previous batch if any + + for qa_pair in evicted_qas: + current_page_obj = { + "page_id": generate_id("page"), + "user_input": qa_pair.get("user_input", ""), + "agent_response": qa_pair.get("agent_response", ""), + "timestamp": qa_pair.get("timestamp", get_timestamp()), + "preloaded": False, # Default for new pages from short-term + "analyzed": False, # Default for new pages from short-term + "pre_page": None, + "next_page": None, + "meta_info": None + } + + is_continuous = check_conversation_continuity(temp_last_page_in_batch, current_page_obj, self.client, model=self.llm_model) + + if is_continuous and temp_last_page_in_batch: + current_page_obj["pre_page"] = temp_last_page_in_batch["page_id"] + # The actual next_page for temp_last_page_in_batch will be set when it's stored in mid-term + # or if it's already there, it needs an update. This linking is tricky. + # For now, we establish the link from current to previous. + # MidTermMemory's update_page_connections can fix the other side if pages are already there. + + # Meta info generation based on continuity + last_meta = temp_last_page_in_batch.get("meta_info") + new_meta = generate_page_meta_info(last_meta, current_page_obj, self.client, model=self.llm_model) + current_page_obj["meta_info"] = new_meta + # If temp_last_page_in_batch was part of a chain, its meta_info and subsequent ones should update. + # This implies that meta_info should perhaps be updated more globally or propagated. + # For now, new_meta applies to current_page_obj and potentially its chain. + # We can call _update_linked_pages_meta_info if temp_last_page_in_batch is in mid-term already. + if temp_last_page_in_batch.get("page_id") and self.mid_term_memory.get_page_by_id(temp_last_page_in_batch["page_id"]): + self._update_linked_pages_meta_info(temp_last_page_in_batch["page_id"], new_meta) + else: + # Start of a new chain or no previous page + current_page_obj["meta_info"] = generate_page_meta_info(None, current_page_obj, self.client, model=self.llm_model) + + current_batch_pages.append(current_page_obj) + temp_last_page_in_batch = current_page_obj # Update for the next iteration in this batch + + # Update the global last evicted page for the next run of this method + if current_batch_pages: + self.last_evicted_page_for_continuity = current_batch_pages[-1] + + # 2. Consolidate text from current_batch_pages for multi-summary + if not current_batch_pages: + return + + input_text_for_summary = "\n".join([ + f"User: {p.get('user_input','')}\nAssistant: {p.get('agent_response','')}" + for p in current_batch_pages + ]) + + print("Updater: Generating multi-topic summary for the evicted batch...") + multi_summary_result = gpt_generate_multi_summary(input_text_for_summary, self.client, model=self.llm_model) + + # 3. Insert pages into MidTermMemory based on summaries + if multi_summary_result and multi_summary_result.get("summaries"): + for summary_item in multi_summary_result["summaries"]: + theme_summary = summary_item.get("content", "General summary of recent interactions.") + theme_keywords = summary_item.get("keywords", []) + print(f"Updater: Processing theme '{summary_item.get('theme')}' for mid-term insertion.") + + # Pass the already processed pages (with IDs, embeddings to be added by MidTermMemory if not present) + self.mid_term_memory.insert_pages_into_session( + summary_for_new_pages=theme_summary, + keywords_for_new_pages=theme_keywords, + pages_to_insert=current_batch_pages, # These pages now have pre_page, next_page, meta_info set up + similarity_threshold=self.topic_similarity_threshold + ) + else: + # Fallback: if no summaries, add as one session or handle as a single block + print("Updater: No specific themes from multi-summary. Adding batch as a general session.") + fallback_summary = "General conversation segment from short-term memory." + fallback_keywords = [] # Use empty keywords since multi-summary failed + self.mid_term_memory.insert_pages_into_session( + summary_for_new_pages=fallback_summary, + keywords_for_new_pages=fallback_keywords, + pages_to_insert=current_batch_pages, + similarity_threshold=self.topic_similarity_threshold + ) + + # After pages are in mid-term, ensure their connections are doubly linked if needed. + # MidTermMemory.insert_pages_into_session should ideally handle this internally + # or we might need a separate pass to solidify connections after all insertions. + for page in current_batch_pages: + if page.get("pre_page"): + self.mid_term_memory.update_page_connections(page["pre_page"], page["page_id"]) + if page.get("next_page"): + self.mid_term_memory.update_page_connections(page["page_id"], page["next_page"]) # This seems redundant if next is set by prior + if current_batch_pages: # Save if any pages were processed + self.mid_term_memory.save() + + def update_long_term_from_analysis(self, user_id, profile_analysis_result): + """ + Updates long-term memory based on the results of a personality/knowledge analysis. + profile_analysis_result is expected to be a dict with keys like "profile", "private", "assistant_knowledge". + """ + if not profile_analysis_result: + print("Updater: No analysis result provided for long-term update.") + return + + new_profile_text = profile_analysis_result.get("profile") + if new_profile_text and new_profile_text.lower() != "none": + print(f"Updater: Updating user profile for {user_id} in LongTermMemory.") + # 直接使用新的分析结果作为完整画像,因为它应该已经是集成后的结果 + self.long_term_memory.update_user_profile(user_id, new_profile_text, merge=False) + + user_private_knowledge = profile_analysis_result.get("private") + if user_private_knowledge and user_private_knowledge.lower() != "none": + print(f"Updater: Adding user private knowledge for {user_id} to LongTermMemory.") + # Split if multiple lines, assuming each line is a distinct piece of knowledge + for line in user_private_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.long_term_memory.add_user_knowledge(line.strip()) + + assistant_knowledge_text = profile_analysis_result.get("assistant_knowledge") + if assistant_knowledge_text and assistant_knowledge_text.lower() != "none": + print("Updater: Adding assistant knowledge to LongTermMemory.") + for line in assistant_knowledge_text.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.long_term_memory.add_assistant_knowledge(line.strip()) + + # LongTermMemory.save() is called by its add/update methods \ No newline at end of file diff --git a/memoryos-mcp/memoryos/utils.py b/memoryos-mcp/memoryos/utils.py new file mode 100644 index 0000000..6983a44 --- /dev/null +++ b/memoryos-mcp/memoryos/utils.py @@ -0,0 +1,393 @@ +import time +import uuid +import openai +import numpy as np +from sentence_transformers import SentenceTransformer +import json +import os +import inspect +from functools import wraps +try: + from . import prompts # 尝试相对导入 +except ImportError: + import prompts # 回退到绝对导入 +from openai import OpenAI +from concurrent.futures import ThreadPoolExecutor, as_completed +import threading + +def clean_reasoning_model_output(text): + """ + 清理推理模型输出中的标签 + 适配推理模型(如o1系列)的输出格式 + """ + if not text: + return text + + import re + # 移除...标签及其内容 + cleaned_text = re.sub(r'.*?', '', text, flags=re.DOTALL) + # 清理可能产生的多余空白行 + cleaned_text = re.sub(r'\n\s*\n\s*\n', '\n\n', cleaned_text) + # 移除开头和结尾的空白 + cleaned_text = cleaned_text.strip() + + return cleaned_text + +# ---- OpenAI Client ---- +class OpenAIClient: + def __init__(self, api_key, base_url=None, max_workers=5): + self.api_key = api_key + self.base_url = base_url if base_url else "https://api.openai.com/v1" + # The openai library looks for OPENAI_API_KEY and OPENAI_BASE_URL env vars by default + # or they can be passed directly to the client. + # For simplicity and explicit control, we'll pass them to the client constructor. + self.client = OpenAI(api_key=self.api_key, base_url=self.base_url) + self.executor = ThreadPoolExecutor(max_workers=max_workers) + self._lock = threading.Lock() + + def chat_completion(self, model, messages, temperature=0.7, max_tokens=2000): + print(f"Calling OpenAI API. Model: {model}") + try: + response = self.client.chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens + ) + raw_content = response.choices[0].message.content.strip() + # 自动清理推理模型的标签 + cleaned_content = clean_reasoning_model_output(raw_content) + return cleaned_content + except Exception as e: + print(f"Error calling OpenAI API: {e}") + # Fallback or error handling + return "Error: Could not get response from LLM." + + def chat_completion_async(self, model, messages, temperature=0.7, max_tokens=2000): + """异步版本的chat_completion""" + return self.executor.submit(self.chat_completion, model, messages, temperature, max_tokens) + + def batch_chat_completion(self, requests): + """ + 并行处理多个LLM请求 + requests: List of dict with keys: model, messages, temperature, max_tokens + """ + futures = [] + for req in requests: + future = self.chat_completion_async( + model=req.get("model", "gpt-4o-mini"), + messages=req["messages"], + temperature=req.get("temperature", 0.7), + max_tokens=req.get("max_tokens", 2000) + ) + futures.append(future) + + results = [] + for future in as_completed(futures): + try: + result = future.result() + results.append(result) + except Exception as e: + print(f"Error in batch completion: {e}") + results.append("Error: Could not get response from LLM.") + + return results + + def shutdown(self): + """关闭线程池""" + self.executor.shutdown(wait=True) + +# ---- Parallel Processing Utilities ---- +def run_parallel_tasks(tasks, max_workers=3): + """ + 并行执行任务列表 + tasks: List of callable functions + """ + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [executor.submit(task) for task in tasks] + results = [] + for future in as_completed(futures): + try: + result = future.result() + results.append(result) + except Exception as e: + print(f"Error in parallel task: {e}") + results.append(None) + return results + +# ---- Basic Utilities ---- +def get_timestamp(): + return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + +def generate_id(prefix="id"): + return f"{prefix}_{uuid.uuid4().hex[:8]}" + +def ensure_directory_exists(path): + os.makedirs(os.path.dirname(path), exist_ok=True) + +# ---- Embedding Utilities ---- +_model_cache = {} +_embedding_cache = {} # 添加embedding缓存 + +def _get_valid_kwargs(func, kwargs): + """Helper to filter kwargs for a given function's signature.""" + try: + sig = inspect.signature(func) + param_keys = set(sig.parameters.keys()) + return {k: v for k, v in kwargs.items() if k in param_keys} + except (ValueError, TypeError): + # Fallback for functions/methods where signature inspection is not straightforward + return kwargs + +def get_embedding(text, model_name="all-MiniLM-L6-v2", use_cache=True, **kwargs): + """ + 获取文本的embedding向量。 + 支持多种主流模型,能自动适应不同库的调用方式。 + - SentenceTransformer模型: e.g., 'all-MiniLM-L6-v2', 'Qwen/Qwen3-Embedding-0.6B' + - FlagEmbedding模型: e.g., 'BAAI/bge-m3' + + :param text: 输入文本。 + :param model_name: Hugging Face上的模型名称。 + :param use_cache: 是否使用内存缓存。 + :param kwargs: 传递给模型构造函数或encode方法的额外参数。 + - for Qwen: `model_kwargs`, `tokenizer_kwargs`, `prompt_name="query"` + - for BGE-M3: `use_fp16=True`, `max_length=8192` + :return: 文本的embedding向量 (numpy array)。 + """ + model_config_key = json.dumps({"model_name": model_name, **kwargs}, sort_keys=True) + + if use_cache: + cache_key = f"{model_config_key}::{hash(text)}" + if cache_key in _embedding_cache: + return _embedding_cache[cache_key] + + # --- Model Loading --- + model_init_key = json.dumps({"model_name": model_name, **{k:v for k,v in kwargs.items() if k not in ['batch_size', 'max_length']}}, sort_keys=True) + if model_init_key not in _model_cache: + print(f"Loading model: {model_name}...") + if 'bge-m3' in model_name.lower(): + try: + from FlagEmbedding import BGEM3FlagModel + init_kwargs = _get_valid_kwargs(BGEM3FlagModel.__init__, kwargs) + print(f"-> Using BGEM3FlagModel with init kwargs: {init_kwargs}") + _model_cache[model_init_key] = BGEM3FlagModel(model_name, **init_kwargs) + except ImportError: + raise ImportError("Please install FlagEmbedding: 'pip install -U FlagEmbedding' to use bge-m3 model.") + else: # Default handler for SentenceTransformer-based models (like Qwen, all-MiniLM, etc.) + try: + from sentence_transformers import SentenceTransformer + init_kwargs = _get_valid_kwargs(SentenceTransformer.__init__, kwargs) + print(f"-> Using SentenceTransformer with init kwargs: {init_kwargs}") + _model_cache[model_init_key] = SentenceTransformer(model_name, **init_kwargs) + except ImportError: + raise ImportError("Please install sentence-transformers: 'pip install -U sentence-transformers' to use this model.") + + model = _model_cache[model_init_key] + + # --- Encoding --- + embedding = None + if 'bge-m3' in model_name.lower(): + encode_kwargs = _get_valid_kwargs(model.encode, kwargs) + print(f"-> Encoding with BGEM3FlagModel using kwargs: {encode_kwargs}") + result = model.encode([text], **encode_kwargs) + embedding = result['dense_vecs'][0] + else: # Default to SentenceTransformer-based models + encode_kwargs = _get_valid_kwargs(model.encode, kwargs) + print(f"-> Encoding with SentenceTransformer using kwargs: {encode_kwargs}") + embedding = model.encode([text], **encode_kwargs)[0] + + if use_cache: + cache_key = f"{model_config_key}::{hash(text)}" + _embedding_cache[cache_key] = embedding + if len(_embedding_cache) > 10000: + keys_to_remove = list(_embedding_cache.keys())[:1000] + for key in keys_to_remove: + try: + del _embedding_cache[key] + except KeyError: + pass + print("Cleaned embedding cache to prevent memory overflow") + + return embedding + + +def clear_embedding_cache(): + """清空embedding缓存""" + global _embedding_cache + _embedding_cache.clear() + print("Embedding cache cleared") + +def normalize_vector(vec): + vec = np.array(vec, dtype=np.float32) + norm = np.linalg.norm(vec) + if norm == 0: + return vec + return vec / norm + +# ---- Time Decay Function ---- +def compute_time_decay(event_timestamp_str, current_timestamp_str, tau_hours=24): + from datetime import datetime + fmt = "%Y-%m-%d %H:%M:%S" + try: + t_event = datetime.strptime(event_timestamp_str, fmt) + t_current = datetime.strptime(current_timestamp_str, fmt) + delta_hours = (t_current - t_event).total_seconds() / 3600.0 + return np.exp(-delta_hours / tau_hours) + except ValueError: # Handle cases where timestamp might be invalid + return 0.1 # Default low recency + + +# ---- LLM-based Utility Functions ---- + +def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"): + dialog_text = "\n".join([f"User: {d.get('user_input','')} Assistant: {d.get('agent_response','')}" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.SUMMARIZE_DIALOGS_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.SUMMARIZE_DIALOGS_USER_PROMPT.format(dialog_text=dialog_text)} + ] + print("Calling LLM to generate topic summary...") + return client.chat_completion(model=model, messages=messages) + +def gpt_generate_multi_summary(text, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.MULTI_SUMMARY_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.MULTI_SUMMARY_USER_PROMPT.format(text=text)} + ] + print("Calling LLM to generate multi-topic summary...") + response_text = client.chat_completion(model=model, messages=messages) + try: + summaries = json.loads(response_text) + except json.JSONDecodeError: + print(f"Warning: Could not parse multi-summary JSON: {response_text}") + summaries = [] # Return empty list or a default structure + return {"input": text, "summaries": summaries} + + +def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", existing_user_profile="None"): + """ + Analyze and update user personality profile from dialogs + 结合现有画像和新对话,直接输出更新后的完整画像 + """ + conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.PERSONALITY_ANALYSIS_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.PERSONALITY_ANALYSIS_USER_PROMPT.format( + conversation=conversation, + existing_user_profile=existing_user_profile + )} + ] + print("Calling LLM for user profile analysis and update...") + result_text = client.chat_completion(model=model, messages=messages) + return result_text.strip() if result_text else "None" + + +def gpt_knowledge_extraction(dialogs, client: OpenAIClient, model="gpt-4o-mini"): + """Extract user private data and assistant knowledge from dialogs""" + conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.KNOWLEDGE_EXTRACTION_USER_PROMPT.format( + conversation=conversation + )} + ] + print("Calling LLM for knowledge extraction...") + result_text = client.chat_completion(model=model, messages=messages) + + private_data = "None" + assistant_knowledge = "None" + + try: + if "【User Private Data】" in result_text: + private_data_start = result_text.find("【User Private Data】") + len("【User Private Data】") + if "【Assistant Knowledge】" in result_text: + private_data_end = result_text.find("【Assistant Knowledge】") + private_data = result_text[private_data_start:private_data_end].strip() + + assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") + assistant_knowledge = result_text[assistant_knowledge_start:].strip() + else: + private_data = result_text[private_data_start:].strip() + elif "【Assistant Knowledge】" in result_text: + assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") + assistant_knowledge = result_text[assistant_knowledge_start:].strip() + + except Exception as e: + print(f"Error parsing knowledge extraction: {e}. Raw result: {result_text}") + + return { + "private": private_data if private_data else "None", + "assistant_knowledge": assistant_knowledge if assistant_knowledge else "None" + } + + +# Keep the old function for backward compatibility, but mark as deprecated +def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", known_user_traits="None"): + """ + DEPRECATED: Use gpt_user_profile_analysis and gpt_knowledge_extraction instead. + This function is kept for backward compatibility only. + """ + # Call the new functions + profile = gpt_user_profile_analysis(dialogs, client, model, known_user_traits) + knowledge_data = gpt_knowledge_extraction(dialogs, client, model) + + return { + "profile": profile, + "private": knowledge_data["private"], + "assistant_knowledge": knowledge_data["assistant_knowledge"] + } + + +def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.UPDATE_PROFILE_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.UPDATE_PROFILE_USER_PROMPT.format(old_profile=old_profile, new_analysis=new_analysis)} + ] + print("Calling LLM to update user profile...") + return client.chat_completion(model=model, messages=messages) + +def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.EXTRACT_THEME_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.EXTRACT_THEME_USER_PROMPT.format(answer_text=answer_text)} + ] + print("Calling LLM to extract theme...") + return client.chat_completion(model=model, messages=messages) + +def llm_extract_keywords(text, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.EXTRACT_KEYWORDS_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.EXTRACT_KEYWORDS_USER_PROMPT.format(text=text)} + ] + print("Calling LLM to extract keywords...") + response = client.chat_completion(model=model, messages=messages) + return [kw.strip() for kw in response.split(',') if kw.strip()] + +# ---- Functions from dynamic_update.py (to be used by Updater class) ---- +def check_conversation_continuity(previous_page, current_page, client: OpenAIClient, model="gpt-4o-mini"): + prev_user = previous_page.get("user_input", "") if previous_page else "" + prev_agent = previous_page.get("agent_response", "") if previous_page else "" + + user_prompt = prompts.CONTINUITY_CHECK_USER_PROMPT.format( + prev_user=prev_user, + prev_agent=prev_agent, + curr_user=current_page.get("user_input", ""), + curr_agent=current_page.get("agent_response", "") + ) + messages = [ + {"role": "system", "content": prompts.CONTINUITY_CHECK_SYSTEM_PROMPT}, + {"role": "user", "content": user_prompt} + ] + response = client.chat_completion(model=model, messages=messages, temperature=0.0, max_tokens=10) + return response.strip().lower() == "true" + +def generate_page_meta_info(last_page_meta, current_page, client: OpenAIClient, model="gpt-4o-mini"): + current_conversation = f"User: {current_page.get('user_input', '')}\nAssistant: {current_page.get('agent_response', '')}" + user_prompt = prompts.META_INFO_USER_PROMPT.format( + last_meta=last_page_meta if last_page_meta else "None", + new_dialogue=current_conversation + ) + messages = [ + {"role": "system", "content": prompts.META_INFO_SYSTEM_PROMPT}, + {"role": "user", "content": user_prompt} + ] + return client.chat_completion(model=model, messages=messages, temperature=0.3, max_tokens=100).strip() \ No newline at end of file diff --git a/memoryos-mcp/requirements.txt b/memoryos-mcp/requirements.txt new file mode 100644 index 0000000..40f8d3f --- /dev/null +++ b/memoryos-mcp/requirements.txt @@ -0,0 +1,19 @@ + +mcp + +openai>=1.0.0 + +numpy==1.24 + +sentence-transformers>=2.2.0 + +faiss-gpu>=1.7.0 + +# 时间和日期处理 +python-dateutil>=2.8.0 + +typing-extensions>=4.0.0 + +# 可选:如果GPU不可用,可以手动安装CPU版本 +# pip uninstall faiss-gpu +# pip install faiss-cpu>=1.7.0 \ No newline at end of file diff --git a/memoryos-mcp/server_new.py b/memoryos-mcp/server_new.py new file mode 100644 index 0000000..3a7e242 --- /dev/null +++ b/memoryos-mcp/server_new.py @@ -0,0 +1,292 @@ + +import sys +import os +import json +import argparse +from typing import Any, Dict, Optional, List +# 确保当前目录在sys.path中,以便导入memoryos模块 +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'memoryos')) + +try: + from mcp.server.fastmcp import FastMCP +except ImportError as e: + print(f"ERROR: Failed to import FastMCP. Exception: {e}", file=sys.stderr) + print("请安装最新版本的MCP: pip install --upgrade mcp", file=sys.stderr) + sys.exit(1) + +try: + from memoryos import Memoryos + from utils import get_timestamp +except ImportError as e: + print(f"无法导入MemoryOS模块: {e}", file=sys.stderr) + print("请确保项目结构正确,memoryos目录应包含所有必要文件", file=sys.stderr) + sys.exit(1) + +# MemoryOS实例 - 将在初始化时设置 +memoryos_instance: Optional[Memoryos] = None + +def init_memoryos(config_path: str) -> Memoryos: + """初始化MemoryOS实例""" + if not os.path.exists(config_path): + raise FileNotFoundError(f"配置文件不存在: {config_path}") + + with open(config_path, 'r', encoding='utf-8') as f: + config = json.load(f) + + required_fields = ['user_id', 'openai_api_key', 'data_storage_path'] + for field in required_fields: + if field not in config: + raise ValueError(f"配置文件缺少必需字段: {field}") + + return Memoryos( + user_id=config['user_id'], + openai_api_key=config['openai_api_key'], + data_storage_path=config['data_storage_path'], + openai_base_url=config.get('openai_base_url'), + assistant_id=config.get('assistant_id', 'default_assistant_profile'), + short_term_capacity=config.get('short_term_capacity', 10), + mid_term_capacity=config.get('mid_term_capacity', 2000), + long_term_knowledge_capacity=config.get('long_term_knowledge_capacity', 100), + retrieval_queue_capacity=config.get('retrieval_queue_capacity', 7), + mid_term_heat_threshold=config.get('mid_term_heat_threshold', 5.0), + llm_model=config.get('llm_model', 'gpt-4o-mini'), + embedding_model_name=config.get('embedding_model_name', 'all-MiniLM-L6-v2') + ) + +# 创建FastMCP服务器实例 +mcp = FastMCP("MemoryOS") + +@mcp.tool() +def add_memory(user_input: str, agent_response: str, timestamp: Optional[str] = None, meta_data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + """ + 向MemoryOS系统添加新的记忆(用户输入和助手回应的对话对) + + Args: + user_input: 用户的输入或问题 + agent_response: 助手的回应 + timestamp: 时间戳(可选,格式:YYYY-MM-DD HH:MM:SS) + meta_data: 可选的元数据(JSON对象) + + Returns: + 包含操作结果的字典 + """ + global memoryos_instance + + if memoryos_instance is None: + return { + "status": "error", + "message": "MemoryOS is not initialized. Please check the configuration file." + } + + try: + if not user_input or not agent_response: + return { + "status": "error", + "message": "user_input and agent_response are required" + } + + memoryos_instance.add_memory( + user_input=user_input, + agent_response=agent_response, + timestamp=timestamp or get_timestamp(), + meta_data=meta_data or {} + ) + + result = { + "status": "success", + "message": "Memory has been successfully added to MemoryOS", + "timestamp": timestamp or get_timestamp(), + "details": { + "user_input_length": len(user_input), + "agent_response_length": len(agent_response), + "has_meta_data": meta_data is not None + } + } + + return result + + except Exception as e: + return { + "status": "error", + "message": f"Error adding memory: {str(e)}" + } + +@mcp.tool() +def retrieve_memory(query: str, relationship_with_user: str = "friend", style_hint: str = "", max_results: int = 10) -> Dict[str, Any]: + """ + 根据查询从MemoryOS检索相关的记忆和上下文信息,包括短期记忆、中期记忆和长期知识 + + Args: + query: 检索查询,描述要寻找的信息 + relationship_with_user: 与用户的关系类型(如:friend, assistant, colleague等) + style_hint: 回应风格提示 + max_results: 返回的最大结果数量 + + Returns: + 包含检索结果的字典,包括: + - short_term_memory: 当前短期记忆中的所有QA对 + - retrieved_pages: 从中期记忆检索的相关页面 + - retrieved_user_knowledge: 从用户长期知识库检索的相关条目 + - retrieved_assistant_knowledge: 从助手知识库检索的相关条目 + """ + global memoryos_instance + + if memoryos_instance is None: + return { + "status": "error", + "message": "MemoryOS is not initialized. Please check the configuration file." + } + + try: + if not query: + return { + "status": "error", + "message": "query parameter is required" + } + + # 使用retriever获取相关上下文 + retrieval_results = memoryos_instance.retriever.retrieve_context( + user_query=query, + user_id=memoryos_instance.user_id + ) + + # 获取短期记忆内容 + short_term_history = memoryos_instance.short_term_memory.get_all() + + # 获取用户画像 + user_profile = memoryos_instance.get_user_profile_summary() + + # 组织返回结果 + result = { + "status": "success", + "query": query, + "timestamp": get_timestamp(), + "user_profile": user_profile if user_profile and user_profile.lower() != "none" else "No detailed user profile", + "short_term_memory": short_term_history, + "short_term_count": len(short_term_history), + "retrieved_pages": [{ + 'user_input': page['user_input'], + 'agent_response': page['agent_response'], + 'timestamp': page['timestamp'], + 'meta_info': page['meta_info'] + } for page in retrieval_results["retrieved_pages"][:max_results]], + + "retrieved_user_knowledge": [{ + 'knowledge': k['knowledge'], + 'timestamp': k['timestamp'] + } for k in retrieval_results["retrieved_user_knowledge"][:max_results]], + + "retrieved_assistant_knowledge": [{ + 'knowledge': k['knowledge'], + 'timestamp': k['timestamp'] + } for k in retrieval_results["retrieved_assistant_knowledge"][:max_results]], + + # 添加总数统计字段 + "total_pages_found": len(retrieval_results["retrieved_pages"]), + "total_user_knowledge_found": len(retrieval_results["retrieved_user_knowledge"]), + "total_assistant_knowledge_found": len(retrieval_results["retrieved_assistant_knowledge"]) + } + + return result + + except Exception as e: + return { + "status": "error", + "message": f"Error retrieving memory: {str(e)}" + } + +@mcp.tool() +def get_user_profile(include_knowledge: bool = True, include_assistant_knowledge: bool = False) -> Dict[str, Any]: + """ + 获取用户的画像信息,包括个性特征、偏好和相关知识 + + Args: + include_knowledge: 是否包括用户相关的知识条目 + include_assistant_knowledge: 是否包括助手知识库 + + Returns: + 包含用户画像信息的字典 + """ + global memoryos_instance + + if memoryos_instance is None: + return { + "status": "error", + "message": "MemoryOS is not initialized. Please check the configuration file." + } + + try: + # 获取用户画像 + user_profile = memoryos_instance.get_user_profile_summary() + + result = { + "status": "success", + "timestamp": get_timestamp(), + "user_id": memoryos_instance.user_id, + "assistant_id": memoryos_instance.assistant_id, + "user_profile": user_profile if user_profile and user_profile.lower() != "none" else "No detailed user profile" + } + + if include_knowledge: + user_knowledge = memoryos_instance.user_long_term_memory.get_user_knowledge() + result["user_knowledge"] = [ + { + "knowledge": item["knowledge"], + "timestamp": item["timestamp"] + } + for item in user_knowledge + ] + result["user_knowledge_count"] = len(user_knowledge) + + if include_assistant_knowledge: + assistant_knowledge = memoryos_instance.get_assistant_knowledge_summary() + result["assistant_knowledge"] = [ + { + "knowledge": item["knowledge"], + "timestamp": item["timestamp"] + } + for item in assistant_knowledge + ] + result["assistant_knowledge_count"] = len(assistant_knowledge) + + return result + + except Exception as e: + return { + "status": "error", + "message": f"Error getting user profile: {str(e)}" + } + +def main(): + """主函数""" + parser = argparse.ArgumentParser(description="MemoryOS MCP Server") + parser.add_argument( + "--config", + type=str, + default="config.json", + help="配置文件路径 (默认: config.json)" + ) + + args = parser.parse_args() + + global memoryos_instance + + try: + # 初始化MemoryOS + memoryos_instance = init_memoryos(args.config) + print(f"MemoryOS MCP Server 已启动,用户ID: {memoryos_instance.user_id}", file=sys.stderr) + print(f"配置文件: {args.config}", file=sys.stderr) + + # 启动MCP服务器 - 使用stdio传输 + mcp.run(transport="stdio") + + except KeyboardInterrupt: + print("服务器被用户中断", file=sys.stderr) + except Exception as e: + print(f"启动服务器时发生错误: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/memoryos-mcp/test_simple.py b/memoryos-mcp/test_simple.py new file mode 100644 index 0000000..585423a --- /dev/null +++ b/memoryos-mcp/test_simple.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +""" +Simple MemoryOS MCP Server Test +- Insert 15 conversations +- Set short-term memory capacity to 2 +- Test 2 specific queries to verify memory retrieval works correctly. +""" + +import asyncio +import json +import sys +from pathlib import Path + +# Import MCP client +try: + from mcp import ClientSession, StdioServerParameters + from mcp.client.stdio import stdio_client + from mcp import types +except ImportError as e: + print(f"❌ Failed to import MCP client library: {e}") + print("Please install official MCP SDK: pip install mcp") + sys.exit(1) + +class SimpleMemoryOSTest: + """Simple MemoryOS MCP Server Test""" + + def __init__(self, server_script: str = "server_new.py", config_file: str = "config.json"): + self.server_script = Path(server_script) + self.config_file = Path(config_file) + + # Validate file existence + if not self.server_script.exists(): + raise FileNotFoundError(f"Server script not found: {self.server_script}") + if not self.config_file.exists(): + raise FileNotFoundError(f"Config file not found: {self.config_file}") + + def get_server_params(self): + """Get server parameters""" + return StdioServerParameters( + command=sys.executable, + args=[str(self.server_script), "--config", str(self.config_file)], + env=None + ) + + async def test_insert_conversations(self): + """Insert 15 conversations into MemoryOS""" + print("\n💾 Step 1: Insert 15 Conversations") + + # 15 test conversations + conversations = [ + {"user_input": "Hello, I'm Tom from San Francisco", "agent_response": "Hello Tom! Nice to meet you. San Francisco is a great city!"}, + {"user_input": "I work as a software engineer", "agent_response": "That's awesome! Software engineering is a fascinating field. What technologies do you work with?"}, + {"user_input": "I mainly use Python and JavaScript", "agent_response": "Great choice! Python and JavaScript are very popular and powerful languages."}, + {"user_input": "I'm interested in machine learning", "agent_response": "Machine learning is an exciting field! Are you focusing on any particular area?"}, + {"user_input": "I want to learn about neural networks", "agent_response": "Neural networks are the foundation of deep learning. Would you like to start with the basics?"}, + {"user_input": "Yes, please explain backpropagation", "agent_response": "Backpropagation is the key algorithm for training neural networks. It calculates gradients to update weights."}, + {"user_input": "I have a project idea about chatbots", "agent_response": "Chatbots are a great application of ML! What kind of chatbot are you thinking about?"}, + {"user_input": "A customer service chatbot", "agent_response": "Customer service chatbots can be very helpful. You'll need to consider intent recognition and response generation."}, + {"user_input": "What frameworks should I use?", "agent_response": "For chatbots, you could use frameworks like Rasa, Dialogflow, or build with PyTorch/TensorFlow."}, + {"user_input": "I prefer open source solutions", "agent_response": "Great! Rasa is an excellent open-source framework for building conversational AI."}, + {"user_input": "How do I handle multiple languages?", "agent_response": "For multilingual support, you can use translation APIs or train separate models for each language."}, + {"user_input": "I also like hiking in my free time", "agent_response": "Hiking is a wonderful hobby! San Francisco has some great trails nearby like Lands End and Mount Sutro."}, + {"user_input": "Do you know any good hiking spots?", "agent_response": "Yes! You might enjoy Muir Woods, Mount Tamalpais, or the coastal trails in Pacifica."}, + {"user_input": "I'm planning a weekend trip", "agent_response": "That sounds fun! Are you thinking of staying local or going somewhere further?"}, + {"user_input": "Maybe somewhere within 2 hours drive", "agent_response": "Perfect! You could visit Napa Valley, Santa Cruz, or even go to Lake Tahoe if you don't mind a slightly longer drive."} + ] + + server_params = self.get_server_params() + + try: + async with stdio_client(server_params) as (read_stream, write_stream): + async with ClientSession(read_stream, write_stream) as session: + await session.initialize() + + success_count = 0 + + for i, conversation in enumerate(conversations, 1): + print(f" Adding conversation {i:2d}/15...") + + result = await session.call_tool("add_memory", conversation) + + if hasattr(result, 'content') and result.content: + content = result.content[0] + if isinstance(content, types.TextContent): + response = json.loads(content.text) + if response.get("status") == "success": + success_count += 1 + print(f" ✅ Conversation {i:2d} added successfully") + else: + print(f" ❌ Conversation {i:2d} failed: {response.get('message', 'Unknown error')}") + else: + print(f" ❌ Conversation {i:2d} failed: Invalid response format") + else: + print(f" ❌ Conversation {i:2d} failed: No response content") + + # Brief delay + await asyncio.sleep(0.1) + + print(f"\n✅ Inserted {success_count}/15 conversations successfully") + return success_count == 15 + + except Exception as e: + print(f"❌ Failed to insert conversations: {e}") + return False + + async def test_memory_retrieval(self): + """Test memory retrieval with 2 specific queries""" + print("\n🔍 Step 2: Test Memory Retrieval") + + # Test queries + test_queries = [ + { + "query": "Tell me about Tom from San Francisco", + "description": "Query about the first conversation - should retrieve Tom's introduction", + "expected_content": ["Tom", "San Francisco", "software engineer"] + }, + { + "query": "What does the user want to learn about machine learning?", + "description": "Query about ML interests - should retrieve neural networks and chatbot discussions", + "expected_content": ["neural networks", "chatbot", "machine learning"] + } + ] + + server_params = self.get_server_params() + + try: + async with stdio_client(server_params) as (read_stream, write_stream): + async with ClientSession(read_stream, write_stream) as session: + await session.initialize() + + for i, test_query in enumerate(test_queries, 1): + print(f"\n--- Query {i}: {test_query['description']} ---") + print(f"Question: {test_query['query']}") + + query_params = { + "query": test_query["query"], + "relationship_with_user": "friend", + "style_hint": "helpful", + "max_results": 10 + } + + result = await session.call_tool("retrieve_memory", query_params) + + if hasattr(result, 'content') and result.content: + content = result.content[0] + if isinstance(content, types.TextContent): + response = json.loads(content.text) + if response.get("status") == "success": + print(f"✅ Query {i} successful!") + + # Display results + pages_found = response.get('total_pages_found', 0) + user_knowledge_found = response.get('total_user_knowledge_found', 0) + assistant_knowledge_found = response.get('total_assistant_knowledge_found', 0) + short_term_count = response.get('short_term_count', 0) + + print(f"📊 Results Summary:") + print(f" - Short-term memory: {short_term_count} items") + print(f" - Mid-term pages: {pages_found} items") + print(f" - User knowledge: {user_knowledge_found} items") + print(f" - Assistant knowledge: {assistant_knowledge_found} items") + + # Show some retrieved content + pages = response.get('retrieved_pages', []) + if pages: + print(f"📄 Retrieved Pages ({len(pages)} items):") + for j, page in enumerate(pages[:3], 1): # Show first 3 + user_input = page.get('user_input', '')[:50] + agent_response = page.get('agent_response', '')[:50] + print(f" {j}. User: {user_input}...") + print(f" Agent: {agent_response}...") + + # Check if expected content is found + full_text = json.dumps(response, ensure_ascii=False).lower() + found_expected = [] + for expected in test_query['expected_content']: + if expected.lower() in full_text: + found_expected.append(expected) + + if found_expected: + print(f"✅ Found expected content: {found_expected}") + else: + print(f"⚠️ Expected content not found: {test_query['expected_content']}") + + # Check if first conversation is retrievable + if i == 1: # First query about Tom + if pages_found > 0 or "tom" in full_text: + print("✅ First conversation successfully moved to mid-term memory and is retrievable!") + else: + print("⚠️ First conversation might not be in mid-term memory yet") + + else: + print(f"❌ Query {i} failed: {response.get('message', 'Unknown error')}") + else: + print(f"❌ Query {i} failed: Invalid response format") + else: + print(f"❌ Query {i} failed: No response content") + + await asyncio.sleep(0.5) # Longer delay between queries + + return True + + except Exception as e: + print(f"❌ Memory retrieval test failed: {e}") + return False + + async def run_test(self): + """Run the complete test""" + print("🚀 Starting Simple MemoryOS MCP Server Test") + print(f"Server script: {self.server_script}") + print(f"Config file: {self.config_file}") + print("=" * 60) + + # Step 1: Insert conversations + insert_success = await self.test_insert_conversations() + if not insert_success: + print("❌ Failed to insert conversations. Stopping test.") + return False + + # Wait a bit for processing + print("\n⏳ Waiting 3 seconds for memory processing...") + await asyncio.sleep(3) + + # Step 2: Test retrieval + retrieval_success = await self.test_memory_retrieval() + + # Summary + print("\n" + "=" * 60) + print("📊 Test Summary:") + print(f"✅ Conversation insertion: {'Passed' if insert_success else 'Failed'}") + print(f"✅ Memory retrieval: {'Passed' if retrieval_success else 'Failed'}") + + if insert_success and retrieval_success: + print("🎉 All tests passed! MemoryOS is working correctly.") + print("🔍 Key findings:") + print(" - Short-term memory capacity limit working (should be 2)") + print(" - Mid-term memory storage and retrieval working") + print(" - First conversation successfully retrievable from mid-term memory") + return True + else: + print("⚠️ Some tests failed. Please check the system.") + return False + +def main(): + """Main function""" + import argparse + + parser = argparse.ArgumentParser(description="Simple MemoryOS MCP Server Test") + parser.add_argument("--server", default="server_new.py", help="Server script path") + parser.add_argument("--config", default="config.json", help="Config file path") + + args = parser.parse_args() + + try: + tester = SimpleMemoryOSTest(args.server, args.config) + success = asyncio.run(tester.run_test()) + sys.exit(0 if success else 1) + except KeyboardInterrupt: + print("\n⚠️ Test interrupted by user") + sys.exit(1) + except Exception as e: + print(f"\n❌ Test failed with error: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() \ No newline at end of file From 9c45e453dc3c22577c1d85cf6b50f18a8b1400cf Mon Sep 17 00:00:00 2001 From: Kang Jiazheng <108711748+kkkjz@users.noreply.github.com> Date: Sun, 13 Jul 2025 15:49:01 +0800 Subject: [PATCH 4/4] Add files via upload --- memoryos-pypi/__init__.py | 3 + memoryos-pypi/long_term.py | 170 ++++++++++++++ memoryos-pypi/memoryos.py | 362 ++++++++++++++++++++++++++++++ memoryos-pypi/mid_term.py | 391 +++++++++++++++++++++++++++++++++ memoryos-pypi/prompts.py | 233 ++++++++++++++++++++ memoryos-pypi/requirements.txt | 23 ++ memoryos-pypi/retriever.py | 131 +++++++++++ memoryos-pypi/short_term.py | 64 ++++++ memoryos-pypi/test.py | 93 ++++++++ memoryos-pypi/updater.py | 239 ++++++++++++++++++++ memoryos-pypi/utils.py | 386 ++++++++++++++++++++++++++++++++ 11 files changed, 2095 insertions(+) create mode 100644 memoryos-pypi/__init__.py create mode 100644 memoryos-pypi/long_term.py create mode 100644 memoryos-pypi/memoryos.py create mode 100644 memoryos-pypi/mid_term.py create mode 100644 memoryos-pypi/prompts.py create mode 100644 memoryos-pypi/requirements.txt create mode 100644 memoryos-pypi/retriever.py create mode 100644 memoryos-pypi/short_term.py create mode 100644 memoryos-pypi/test.py create mode 100644 memoryos-pypi/updater.py create mode 100644 memoryos-pypi/utils.py diff --git a/memoryos-pypi/__init__.py b/memoryos-pypi/__init__.py new file mode 100644 index 0000000..b97e620 --- /dev/null +++ b/memoryos-pypi/__init__.py @@ -0,0 +1,3 @@ +from .memoryos import Memoryos + +__all__ = ['Memoryos'] \ No newline at end of file diff --git a/memoryos-pypi/long_term.py b/memoryos-pypi/long_term.py new file mode 100644 index 0000000..4e49b36 --- /dev/null +++ b/memoryos-pypi/long_term.py @@ -0,0 +1,170 @@ +import json +import numpy as np +import faiss +from collections import deque +try: + from .utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists +except ImportError: + from utils import get_timestamp, get_embedding, normalize_vector, ensure_directory_exists + +class LongTermMemory: + def __init__(self, file_path, knowledge_capacity=100, embedding_model_name: str = "all-MiniLM-L6-v2", embedding_model_kwargs: dict = None): + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.knowledge_capacity = knowledge_capacity + self.user_profiles = {} # {user_id: {data: "profile_string", "last_updated": "timestamp"}} + # Use deques for knowledge bases to easily manage capacity + self.knowledge_base = deque(maxlen=self.knowledge_capacity) # For general/user private knowledge + self.assistant_knowledge = deque(maxlen=self.knowledge_capacity) # For assistant specific knowledge + + self.embedding_model_name = embedding_model_name + self.embedding_model_kwargs = embedding_model_kwargs if embedding_model_kwargs is not None else {} + self.load() + + def update_user_profile(self, user_id, new_data, merge=True): + if merge and user_id in self.user_profiles and self.user_profiles[user_id].get("data"): # Check if data exists + current_data = self.user_profiles[user_id]["data"] + if isinstance(current_data, str) and isinstance(new_data, str): + updated_data = f"{current_data}\n\n--- Updated on {get_timestamp()} ---\n{new_data}" + else: # Fallback to overwrite if types are not strings or for more complex merge + updated_data = new_data + else: + # If merge=False or no existing data, replace with new data + updated_data = new_data + + self.user_profiles[user_id] = { + "data": updated_data, + "last_updated": get_timestamp() + } + print(f"LongTermMemory: Updated user profile for {user_id} (merge={merge}).") + self.save() + + def get_raw_user_profile(self, user_id): + return self.user_profiles.get(user_id, {}).get("data", "None") # Return "None" string if not found + + def get_user_profile_data(self, user_id): + return self.user_profiles.get(user_id, {}) + + def add_knowledge_entry(self, knowledge_text, knowledge_deque: deque, type_name="knowledge"): + if not knowledge_text or knowledge_text.strip().lower() in ["", "none", "- none", "- none."]: + print(f"LongTermMemory: Empty {type_name} received, not saving.") + return + + # If deque is full, the oldest item is automatically removed when appending. + vec = get_embedding( + knowledge_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + vec = normalize_vector(vec).tolist() + entry = { + "knowledge": knowledge_text, + "timestamp": get_timestamp(), + "knowledge_embedding": vec + } + knowledge_deque.append(entry) + print(f"LongTermMemory: Added {type_name}. Current count: {len(knowledge_deque)}.") + self.save() + + def add_user_knowledge(self, knowledge_text): + self.add_knowledge_entry(knowledge_text, self.knowledge_base, "user knowledge") + + def add_assistant_knowledge(self, knowledge_text): + self.add_knowledge_entry(knowledge_text, self.assistant_knowledge, "assistant knowledge") + + def get_user_knowledge(self): + return list(self.knowledge_base) + + def get_assistant_knowledge(self): + return list(self.assistant_knowledge) + + def _search_knowledge_deque(self, query, knowledge_deque: deque, threshold=0.1, top_k=5): + if not knowledge_deque: + return [] + + query_vec = get_embedding( + query, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + query_vec = normalize_vector(query_vec) + + embeddings = [] + valid_entries = [] + for entry in knowledge_deque: + if "knowledge_embedding" in entry and entry["knowledge_embedding"]: + embeddings.append(np.array(entry["knowledge_embedding"], dtype=np.float32)) + valid_entries.append(entry) + else: + print(f"Warning: Entry without embedding found in knowledge_deque: {entry.get('knowledge','N/A')[:50]}") + + if not embeddings: + return [] + + embeddings_np = np.array(embeddings, dtype=np.float32) + if embeddings_np.ndim == 1: # Single item case + if embeddings_np.shape[0] == 0: return [] # Empty embeddings + embeddings_np = embeddings_np.reshape(1, -1) + + if embeddings_np.shape[0] == 0: # No valid embeddings + return [] + + dim = embeddings_np.shape[1] + index = faiss.IndexFlatIP(dim) # Using Inner Product for similarity + index.add(embeddings_np) + + query_arr = np.array([query_vec], dtype=np.float32) + distances, indices = index.search(query_arr, min(top_k, len(valid_entries))) # Search at most k or length of valid_entries + + results = [] + for i, idx in enumerate(indices[0]): + if idx != -1: # faiss returns -1 for no valid index + similarity_score = float(distances[0][i]) # For IndexFlatIP, distance is the dot product (similarity) + if similarity_score >= threshold: + results.append(valid_entries[idx]) # Add the original entry dict + + # Sort by similarity score descending before returning, as faiss might not guarantee order for IP + results.sort(key=lambda x: float(np.dot(np.array(x["knowledge_embedding"], dtype=np.float32), query_vec)), reverse=True) + return results + + def search_user_knowledge(self, query, threshold=0.1, top_k=5): + results = self._search_knowledge_deque(query, self.knowledge_base, threshold, top_k) + print(f"LongTermMemory: Searched user knowledge for '{query[:30]}...'. Found {len(results)} matches.") + return results + + def search_assistant_knowledge(self, query, threshold=0.1, top_k=5): + results = self._search_knowledge_deque(query, self.assistant_knowledge, threshold, top_k) + print(f"LongTermMemory: Searched assistant knowledge for '{query[:30]}...'. Found {len(results)} matches.") + return results + + def save(self): + data = { + "user_profiles": self.user_profiles, + "knowledge_base": list(self.knowledge_base), # Convert deques to lists for JSON serialization + "assistant_knowledge": list(self.assistant_knowledge) + } + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(data, f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving LongTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + self.user_profiles = data.get("user_profiles", {}) + # Load into deques, respecting maxlen + kb_data = data.get("knowledge_base", []) + self.knowledge_base = deque(kb_data, maxlen=self.knowledge_capacity) + + ak_data = data.get("assistant_knowledge", []) + self.assistant_knowledge = deque(ak_data, maxlen=self.knowledge_capacity) + + print(f"LongTermMemory: Loaded from {self.file_path}.") + except FileNotFoundError: + print(f"LongTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + print(f"LongTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + print(f"LongTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/memoryos.py b/memoryos-pypi/memoryos.py new file mode 100644 index 0000000..ae7da09 --- /dev/null +++ b/memoryos-pypi/memoryos.py @@ -0,0 +1,362 @@ +import os +import json +from concurrent.futures import ThreadPoolExecutor, as_completed + +# 修改为绝对导入 +try: + # 尝试相对导入(当作为包使用时) + from .utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists + from . import prompts + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic + from .long_term import LongTermMemory + from .updater import Updater + from .retriever import Retriever +except ImportError: + # 回退到绝对导入(当作为独立模块使用时) + from utils import OpenAIClient, get_timestamp, generate_id, gpt_user_profile_analysis, gpt_knowledge_extraction, ensure_directory_exists + import prompts + from short_term import ShortTermMemory + from mid_term import MidTermMemory, compute_segment_heat # For H_THRESHOLD logic + from long_term import LongTermMemory + from updater import Updater + from retriever import Retriever + +# Heat threshold for triggering profile/knowledge update from mid-term memory +H_PROFILE_UPDATE_THRESHOLD = 5.0 +DEFAULT_ASSISTANT_ID = "default_assistant_profile" + +class Memoryos: + def __init__(self, user_id: str, + openai_api_key: str, + data_storage_path: str, + openai_base_url: str = None, + assistant_id: str = DEFAULT_ASSISTANT_ID, + short_term_capacity=10, + mid_term_capacity=2000, + long_term_knowledge_capacity=100, + retrieval_queue_capacity=7, + mid_term_heat_threshold=H_PROFILE_UPDATE_THRESHOLD, + mid_term_similarity_threshold=0.6, + llm_model="gpt-4o-mini", + embedding_model_name: str = "all-MiniLM-L6-v2", + embedding_model_kwargs: dict = None + ): + self.user_id = user_id + self.assistant_id = assistant_id + self.data_storage_path = os.path.abspath(data_storage_path) + self.llm_model = llm_model + self.mid_term_similarity_threshold = mid_term_similarity_threshold + self.embedding_model_name = embedding_model_name + + # Smart defaults for embedding_model_kwargs + if embedding_model_kwargs is None: + if 'bge-m3' in self.embedding_model_name.lower(): + print("INFO: Detected bge-m3 model, defaulting embedding_model_kwargs to {'use_fp16': True}") + self.embedding_model_kwargs = {'use_fp16': True} + else: + self.embedding_model_kwargs = {} + else: + self.embedding_model_kwargs = embedding_model_kwargs + + + print(f"Initializing Memoryos for user '{self.user_id}' and assistant '{self.assistant_id}'. Data path: {self.data_storage_path}") + print(f"Using unified LLM model: {self.llm_model}") + print(f"Using embedding model: {self.embedding_model_name} with kwargs: {self.embedding_model_kwargs}") + + # Initialize OpenAI Client + self.client = OpenAIClient(api_key=openai_api_key, base_url=openai_base_url) + + # Define file paths for user-specific data + self.user_data_dir = os.path.join(self.data_storage_path, "users", self.user_id) + user_short_term_path = os.path.join(self.user_data_dir, "short_term.json") + user_mid_term_path = os.path.join(self.user_data_dir, "mid_term.json") + user_long_term_path = os.path.join(self.user_data_dir, "long_term_user.json") # User profile and their knowledge + + # Define file paths for assistant-specific data (knowledge) + self.assistant_data_dir = os.path.join(self.data_storage_path, "assistants", self.assistant_id) + assistant_long_term_path = os.path.join(self.assistant_data_dir, "long_term_assistant.json") + + # Ensure directories exist + ensure_directory_exists(user_short_term_path) # ensure_directory_exists operates on the file path, creating parent dirs + ensure_directory_exists(user_mid_term_path) + ensure_directory_exists(user_long_term_path) + ensure_directory_exists(assistant_long_term_path) + + # Initialize Memory Modules for User + self.short_term_memory = ShortTermMemory(file_path=user_short_term_path, max_capacity=short_term_capacity) + self.mid_term_memory = MidTermMemory( + file_path=user_mid_term_path, + client=self.client, + max_capacity=mid_term_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + self.user_long_term_memory = LongTermMemory( + file_path=user_long_term_path, + knowledge_capacity=long_term_knowledge_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + + # Initialize Memory Module for Assistant Knowledge + self.assistant_long_term_memory = LongTermMemory( + file_path=assistant_long_term_path, + knowledge_capacity=long_term_knowledge_capacity, + embedding_model_name=self.embedding_model_name, + embedding_model_kwargs=self.embedding_model_kwargs + ) + + # Initialize Orchestration Modules + self.updater = Updater(short_term_memory=self.short_term_memory, + mid_term_memory=self.mid_term_memory, + long_term_memory=self.user_long_term_memory, # Updater primarily updates user's LTM profile/knowledge + client=self.client, + topic_similarity_threshold=mid_term_similarity_threshold, # 传递中期记忆相似度阈值 + llm_model=self.llm_model) + self.retriever = Retriever( + mid_term_memory=self.mid_term_memory, + long_term_memory=self.user_long_term_memory, + assistant_long_term_memory=self.assistant_long_term_memory, # Pass assistant LTM + queue_capacity=retrieval_queue_capacity + ) + + self.mid_term_heat_threshold = mid_term_heat_threshold + + def _trigger_profile_and_knowledge_update_if_needed(self): + """ + Checks mid-term memory for hot segments and triggers profile/knowledge update if threshold is met. + Adapted from main_memoybank.py's update_user_profile_from_top_segment. + Enhanced with parallel LLM processing for better performance. + """ + if not self.mid_term_memory.heap: + return + + # Peek at the top of the heap (hottest segment) + # MidTermMemory heap stores (-H_segment, sid) + neg_heat, sid = self.mid_term_memory.heap[0] + current_heat = -neg_heat + + if current_heat >= self.mid_term_heat_threshold: + session = self.mid_term_memory.sessions.get(sid) + if not session: + self.mid_term_memory.rebuild_heap() # Clean up if session is gone + return + + # Get unanalyzed pages from this hot session + # A page is a dict: {"user_input": ..., "agent_response": ..., "timestamp": ..., "analyzed": False, ...} + unanalyzed_pages = [p for p in session.get("details", []) if not p.get("analyzed", False)] + + if unanalyzed_pages: + print(f"Memoryos: Mid-term session {sid} heat ({current_heat:.2f}) exceeded threshold. Analyzing {len(unanalyzed_pages)} pages for profile/knowledge update.") + + # 并行执行两个LLM任务:用户画像分析(已包含更新)、知识提取 + def task_user_profile_analysis(): + print("Memoryos: Starting parallel user profile analysis and update...") + # 获取现有用户画像 + existing_profile = self.user_long_term_memory.get_raw_user_profile(self.user_id) + if not existing_profile or existing_profile.lower() == "none": + existing_profile = "No existing profile data." + + # 直接输出更新后的完整画像 + return gpt_user_profile_analysis(unanalyzed_pages, self.client, model=self.llm_model, existing_user_profile=existing_profile) + + def task_knowledge_extraction(): + print("Memoryos: Starting parallel knowledge extraction...") + return gpt_knowledge_extraction(unanalyzed_pages, self.client, model=self.llm_model) + + # 使用并行任务执行 + with ThreadPoolExecutor(max_workers=2) as executor: + # 提交两个主要任务 + future_profile = executor.submit(task_user_profile_analysis) + future_knowledge = executor.submit(task_knowledge_extraction) + + # 等待结果 + try: + updated_user_profile = future_profile.result() # 直接是更新后的完整画像 + knowledge_result = future_knowledge.result() + except Exception as e: + print(f"Error in parallel LLM processing: {e}") + return + + new_user_private_knowledge = knowledge_result.get("private") + new_assistant_knowledge = knowledge_result.get("assistant_knowledge") + + # 直接使用更新后的完整用户画像 + if updated_user_profile and updated_user_profile.lower() != "none": + print("Memoryos: Updating user profile with integrated analysis...") + self.user_long_term_memory.update_user_profile(self.user_id, updated_user_profile, merge=False) # 直接替换为新的完整画像 + + # Add User Private Knowledge to user's LTM + if new_user_private_knowledge and new_user_private_knowledge.lower() != "none": + for line in new_user_private_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.user_long_term_memory.add_user_knowledge(line.strip()) + + # Add Assistant Knowledge to assistant's LTM + if new_assistant_knowledge and new_assistant_knowledge.lower() != "none": + for line in new_assistant_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.assistant_long_term_memory.add_assistant_knowledge(line.strip()) # Save to dedicated assistant LTM + + # Mark pages as analyzed and reset session heat contributors + for p in session["details"]: + p["analyzed"] = True # Mark all pages in session, or just unanalyzed_pages? + # Original code marked all pages in session + + session["N_visit"] = 0 # Reset visits after analysis + session["L_interaction"] = 0 # Reset interaction length contribution + # session["R_recency"] = 1.0 # Recency will re-calculate naturally + session["H_segment"] = compute_segment_heat(session) # Recompute heat with reset factors + session["last_visit_time"] = get_timestamp() # Update last visit time + + self.mid_term_memory.rebuild_heap() # Heap needs rebuild due to H_segment change + self.mid_term_memory.save() + print(f"Memoryos: Profile/Knowledge update for session {sid} complete. Heat reset.") + else: + print(f"Memoryos: Hot session {sid} has no unanalyzed pages. Skipping profile update.") + else: + # print(f"Memoryos: Top session {sid} heat ({current_heat:.2f}) below threshold. No profile update.") + pass # No action if below threshold + + def add_memory(self, user_input: str, agent_response: str, timestamp: str = None, meta_data: dict = None): + """ + Adds a new QA pair (memory) to the system. + meta_data is not used in the current refactoring but kept for future use. + """ + if not timestamp: + timestamp = get_timestamp() + + qa_pair = { + "user_input": user_input, + "agent_response": agent_response, + "timestamp": timestamp + # meta_data can be added here if it needs to be stored with the QA pair + } + self.short_term_memory.add_qa_pair(qa_pair) + print(f"Memoryos: Added QA to short-term. User: {user_input[:30]}...") + + if self.short_term_memory.is_full(): + print("Memoryos: Short-term memory full. Processing to mid-term.") + self.updater.process_short_term_to_mid_term() + + # After any memory addition that might impact mid-term, check for profile updates + self._trigger_profile_and_knowledge_update_if_needed() + + def get_response(self, query: str, relationship_with_user="friend", style_hint="", user_conversation_meta_data: dict = None) -> str: + """ + Generates a response to the user's query, incorporating memory and context. + """ + print(f"Memoryos: Generating response for query: '{query[:50]}...'") + + # 1. Retrieve context + retrieval_results = self.retriever.retrieve_context( + user_query=query, + user_id=self.user_id + # Using default thresholds from Retriever class for now + ) + retrieved_pages = retrieval_results["retrieved_pages"] + retrieved_user_knowledge = retrieval_results["retrieved_user_knowledge"] + retrieved_assistant_knowledge = retrieval_results["retrieved_assistant_knowledge"] + + # 2. Get short-term history + short_term_history = self.short_term_memory.get_all() + history_text = "\n".join([ + f"User: {qa.get('user_input', '')}\nAssistant: {qa.get('agent_response', '')} (Time: {qa.get('timestamp', '')})" + for qa in short_term_history + ]) + + # 3. Format retrieved mid-term pages (retrieval_queue equivalent) + retrieval_text = "\n".join([ + f"【Historical Memory】\nUser: {page.get('user_input', '')}\nAssistant: {page.get('agent_response', '')}\nTime: {page.get('timestamp', '')}\nConversation chain overview: {page.get('meta_info','N/A')}" + for page in retrieved_pages + ]) + + # 4. Get user profile + user_profile_text = self.user_long_term_memory.get_raw_user_profile(self.user_id) + if not user_profile_text or user_profile_text.lower() == "none": + user_profile_text = "No detailed profile available yet." + + # 5. Format retrieved user knowledge for background + user_knowledge_background = "" + if retrieved_user_knowledge: + user_knowledge_background = "\n【Relevant User Knowledge Entries】\n" + for kn_entry in retrieved_user_knowledge: + user_knowledge_background += f"- {kn_entry['knowledge']} (Recorded: {kn_entry['timestamp']})\n" + + background_context = f"【User Profile】\n{user_profile_text}\n{user_knowledge_background}" + + # 6. Format retrieved Assistant Knowledge (from assistant's LTM) + # Use retrieved assistant knowledge instead of all assistant knowledge + assistant_knowledge_text_for_prompt = "【Assistant Knowledge Base】\n" + if retrieved_assistant_knowledge: + for ak_entry in retrieved_assistant_knowledge: + assistant_knowledge_text_for_prompt += f"- {ak_entry['knowledge']} (Recorded: {ak_entry['timestamp']})\n" + else: + assistant_knowledge_text_for_prompt += "- No relevant assistant knowledge found for this query.\n" + + # 7. Format user_conversation_meta_data (if provided) + meta_data_text_for_prompt = "【Current Conversation Metadata】\n" + if user_conversation_meta_data: + try: + meta_data_text_for_prompt += json.dumps(user_conversation_meta_data, ensure_ascii=False, indent=2) + except TypeError: + meta_data_text_for_prompt += str(user_conversation_meta_data) + else: + meta_data_text_for_prompt += "None provided for this turn." + + # 8. Construct Prompts + system_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT.format( + relationship=relationship_with_user, + assistant_knowledge_text=assistant_knowledge_text_for_prompt, + meta_data_text=meta_data_text_for_prompt # Using meta_data_text placeholder for user_conversation_meta_data + ) + + user_prompt_text = prompts.GENERATE_SYSTEM_RESPONSE_USER_PROMPT.format( + history_text=history_text, + retrieval_text=retrieval_text, + background=background_context, + relationship=relationship_with_user, + query=query + ) + + messages = [ + {"role": "system", "content": system_prompt_text}, + {"role": "user", "content": user_prompt_text} + ] + + # 9. Call LLM for response + print("Memoryos: Calling LLM for final response generation...") + # print("System Prompt:\n", system_prompt_text) + # print("User Prompt:\n", user_prompt_text) + response_content = self.client.chat_completion( + model=self.llm_model, + messages=messages, + temperature=0.7, + max_tokens=1500 # As in original main + ) + + # 10. Add this interaction to memory + self.add_memory(user_input=query, agent_response=response_content, timestamp=get_timestamp()) + + return response_content + + # --- Helper/Maintenance methods (optional additions) --- + def get_user_profile_summary(self) -> str: + return self.user_long_term_memory.get_raw_user_profile(self.user_id) + + def get_assistant_knowledge_summary(self) -> list: + return self.assistant_long_term_memory.get_assistant_knowledge() + + def force_mid_term_analysis(self): + """Forces analysis of all unanalyzed pages in the hottest mid-term segment if heat is above 0. + Useful for testing or manual triggering. + """ + original_threshold = self.mid_term_heat_threshold + self.mid_term_heat_threshold = 0.0 # Temporarily lower threshold + print("Memoryos: Force-triggering mid-term analysis...") + self._trigger_profile_and_knowledge_update_if_needed() + self.mid_term_heat_threshold = original_threshold # Restore original threshold + + def __repr__(self): + return f"" \ No newline at end of file diff --git a/memoryos-pypi/mid_term.py b/memoryos-pypi/mid_term.py new file mode 100644 index 0000000..7c42fa9 --- /dev/null +++ b/memoryos-pypi/mid_term.py @@ -0,0 +1,391 @@ +import json +import numpy as np +from collections import defaultdict +import faiss +import heapq +from datetime import datetime + +try: + from .utils import ( + get_timestamp, generate_id, get_embedding, normalize_vector, + compute_time_decay, ensure_directory_exists, OpenAIClient + ) +except ImportError: + from utils import ( + get_timestamp, generate_id, get_embedding, normalize_vector, + compute_time_decay, ensure_directory_exists, OpenAIClient + ) + +# Heat computation constants (can be tuned or made configurable) +HEAT_ALPHA = 1.0 +HEAT_BETA = 1.0 +HEAT_GAMMA = 1 +RECENCY_TAU_HOURS = 24 # For R_recency calculation in compute_segment_heat + +def compute_segment_heat(session, alpha=HEAT_ALPHA, beta=HEAT_BETA, gamma=HEAT_GAMMA, tau_hours=RECENCY_TAU_HOURS): + N_visit = session.get("N_visit", 0) + L_interaction = session.get("L_interaction", 0) + + # Calculate recency based on last_visit_time + R_recency = 1.0 # Default if no last_visit_time + if session.get("last_visit_time"): + R_recency = compute_time_decay(session["last_visit_time"], get_timestamp(), tau_hours) + + session["R_recency"] = R_recency # Update session's recency factor + return alpha * N_visit + beta * L_interaction + gamma * R_recency + +class MidTermMemory: + def __init__(self, file_path: str, client: OpenAIClient, max_capacity=2000, embedding_model_name: str = "all-MiniLM-L6-v2", embedding_model_kwargs: dict = None): + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.client = client + self.max_capacity = max_capacity + self.sessions = {} # {session_id: session_object} + self.access_frequency = defaultdict(int) # {session_id: access_count_for_lfu} + self.heap = [] # Min-heap storing (-H_segment, session_id) for hottest segments + + self.embedding_model_name = embedding_model_name + self.embedding_model_kwargs = embedding_model_kwargs if embedding_model_kwargs is not None else {} + self.load() + + def get_page_by_id(self, page_id): + for session in self.sessions.values(): + for page in session.get("details", []): + if page.get("page_id") == page_id: + return page + return None + + def update_page_connections(self, prev_page_id, next_page_id): + if prev_page_id: + prev_page = self.get_page_by_id(prev_page_id) + if prev_page: + prev_page["next_page"] = next_page_id + if next_page_id: + next_page = self.get_page_by_id(next_page_id) + if next_page: + next_page["pre_page"] = prev_page_id + # self.save() # Avoid saving on every minor update; save at higher level operations + + def evict_lfu(self): + if not self.access_frequency or not self.sessions: + return + + lfu_sid = min(self.access_frequency, key=self.access_frequency.get) + print(f"MidTermMemory: LFU eviction. Session {lfu_sid} has lowest access frequency.") + + if lfu_sid not in self.sessions: + del self.access_frequency[lfu_sid] # Clean up access frequency if session already gone + self.rebuild_heap() + return + + session_to_delete = self.sessions.pop(lfu_sid) # Remove from sessions + del self.access_frequency[lfu_sid] # Remove from LFU tracking + + # Clean up page connections if this session's pages were linked + for page in session_to_delete.get("details", []): + prev_page_id = page.get("pre_page") + next_page_id = page.get("next_page") + # If a page from this session was linked to an external page, nullify the external link + if prev_page_id and not self.get_page_by_id(prev_page_id): # Check if prev page is still in memory + # This case should ideally not happen if connections are within sessions or handled carefully + pass + if next_page_id and not self.get_page_by_id(next_page_id): + pass + # More robustly, one might need to search all other sessions if inter-session linking was allowed + # For now, assuming internal consistency or that MemoryOS class manages higher-level links + + self.rebuild_heap() + self.save() + print(f"MidTermMemory: Evicted session {lfu_sid}.") + + def add_session(self, summary, details, summary_keywords=None): + session_id = generate_id("session") + summary_vec = get_embedding( + summary, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + summary_vec = normalize_vector(summary_vec).tolist() + summary_keywords = summary_keywords if summary_keywords is not None else [] + + processed_details = [] + for page_data in details: + page_id = page_data.get("page_id", generate_id("page")) + + # 检查是否已有embedding,避免重复计算 + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"MidTermMemory: Reusing existing embedding for page {page_id}") + inp_vec = page_data["page_embedding"] + # 确保embedding是normalized的 + if isinstance(inp_vec, list): + inp_vec_np = np.array(inp_vec, dtype=np.float32) + if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize + inp_vec = normalize_vector(inp_vec_np).tolist() + else: + print(f"MidTermMemory: Computing new embedding for page {page_id}") + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + inp_vec = get_embedding( + full_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + inp_vec = normalize_vector(inp_vec).tolist() + + # 使用已有keywords或设置为空(由multi-summary提供) + if "page_keywords" in page_data and page_data["page_keywords"]: + print(f"MidTermMemory: Using existing keywords for page {page_id}") + page_keywords = page_data["page_keywords"] + else: + print(f"MidTermMemory: Setting empty keywords for page {page_id} (will be filled by multi-summary)") + page_keywords = [] + + processed_page = { + **page_data, # Carry over existing fields like user_input, agent_response, timestamp + "page_id": page_id, + "page_embedding": inp_vec, + "page_keywords": page_keywords, + "preloaded": page_data.get("preloaded", False), # Preserve if passed + "analyzed": page_data.get("analyzed", False), # Preserve if passed + # pre_page, next_page, meta_info are handled by DynamicUpdater + } + processed_details.append(processed_page) + + current_ts = get_timestamp() + session_obj = { + "id": session_id, + "summary": summary, + "summary_keywords": summary_keywords, + "summary_embedding": summary_vec, + "details": processed_details, + "L_interaction": len(processed_details), + "R_recency": 1.0, # Initial recency + "N_visit": 0, + "H_segment": 0.0, # Initial heat, will be computed + "timestamp": current_ts, # Creation timestamp + "last_visit_time": current_ts, # Also initial last_visit_time for recency calc + "access_count_lfu": 0 # For LFU eviction policy + } + session_obj["H_segment"] = compute_segment_heat(session_obj) + self.sessions[session_id] = session_obj + self.access_frequency[session_id] = 0 # Initialize for LFU + heapq.heappush(self.heap, (-session_obj["H_segment"], session_id)) # Use negative heat for max-heap behavior + + print(f"MidTermMemory: Added new session {session_id}. Initial heat: {session_obj['H_segment']:.2f}.") + if len(self.sessions) > self.max_capacity: + self.evict_lfu() + self.save() + return session_id + + def rebuild_heap(self): + self.heap = [] + for sid, session_data in self.sessions.items(): + # Ensure H_segment is up-to-date before rebuilding heap if necessary + # session_data["H_segment"] = compute_segment_heat(session_data) + heapq.heappush(self.heap, (-session_data["H_segment"], sid)) + # heapq.heapify(self.heap) # Not needed if pushing one by one + # No save here, it's an internal operation often followed by other ops that save + + def insert_pages_into_session(self, summary_for_new_pages, keywords_for_new_pages, pages_to_insert, + similarity_threshold=0.6, keyword_similarity_alpha=1.0): + if not self.sessions: # If no existing sessions, just add as a new one + print("MidTermMemory: No existing sessions. Adding new session directly.") + return self.add_session(summary_for_new_pages, pages_to_insert, keywords_for_new_pages) + + new_summary_vec = get_embedding( + summary_for_new_pages, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + new_summary_vec = normalize_vector(new_summary_vec) + + best_sid = None + best_overall_score = -1 + + for sid, existing_session in self.sessions.items(): + existing_summary_vec = np.array(existing_session["summary_embedding"], dtype=np.float32) + semantic_sim = float(np.dot(existing_summary_vec, new_summary_vec)) + + # Keyword similarity (Jaccard index based) + existing_keywords = set(existing_session.get("summary_keywords", [])) + new_keywords_set = set(keywords_for_new_pages) + s_topic_keywords = 0 + if existing_keywords and new_keywords_set: + intersection = len(existing_keywords.intersection(new_keywords_set)) + union = len(existing_keywords.union(new_keywords_set)) + if union > 0: + s_topic_keywords = intersection / union + + overall_score = semantic_sim + keyword_similarity_alpha * s_topic_keywords + + if overall_score > best_overall_score: + best_overall_score = overall_score + best_sid = sid + + if best_sid and best_overall_score >= similarity_threshold: + print(f"MidTermMemory: Merging pages into session {best_sid}. Score: {best_overall_score:.2f} (Threshold: {similarity_threshold})") + target_session = self.sessions[best_sid] + + processed_new_pages = [] + for page_data in pages_to_insert: + page_id = page_data.get("page_id", generate_id("page")) # Use existing or generate new ID + + # 检查是否已有embedding,避免重复计算 + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"MidTermMemory: Reusing existing embedding for page {page_id}") + inp_vec = page_data["page_embedding"] + # 确保embedding是normalized的 + if isinstance(inp_vec, list): + inp_vec_np = np.array(inp_vec, dtype=np.float32) + if np.linalg.norm(inp_vec_np) > 1.1 or np.linalg.norm(inp_vec_np) < 0.9: # 检查是否需要重新normalize + inp_vec = normalize_vector(inp_vec_np).tolist() + else: + print(f"MidTermMemory: Computing new embedding for page {page_id}") + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + inp_vec = get_embedding( + full_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + inp_vec = normalize_vector(inp_vec).tolist() + + # 使用已有keywords或继承session的keywords + if "page_keywords" in page_data and page_data["page_keywords"]: + print(f"MidTermMemory: Using existing keywords for page {page_id}") + page_keywords_current = page_data["page_keywords"] + else: + print(f"MidTermMemory: Using session keywords for page {page_id}") + page_keywords_current = keywords_for_new_pages + + processed_page = { + **page_data, # Carry over existing fields + "page_id": page_id, + "page_embedding": inp_vec, + "page_keywords": page_keywords_current, + # analyzed, preloaded flags should be part of page_data if set + } + target_session["details"].append(processed_page) + processed_new_pages.append(processed_page) + + target_session["L_interaction"] += len(pages_to_insert) + target_session["last_visit_time"] = get_timestamp() # Update last visit time on modification + target_session["H_segment"] = compute_segment_heat(target_session) + self.rebuild_heap() # Rebuild heap as heat has changed + self.save() + return best_sid + else: + print(f"MidTermMemory: No suitable session to merge (best score {best_overall_score:.2f} < threshold {similarity_threshold}). Creating new session.") + return self.add_session(summary_for_new_pages, pages_to_insert, keywords_for_new_pages) + + def search_sessions(self, query_text, segment_similarity_threshold=0.1, page_similarity_threshold=0.1, + top_k_sessions=5, keyword_alpha=1.0, recency_tau_search=3600): + if not self.sessions: + return [] + + query_vec = get_embedding( + query_text, + model_name=self.embedding_model_name, + **self.embedding_model_kwargs + ) + query_vec = normalize_vector(query_vec) + query_keywords = set() # Keywords extraction removed, relying on semantic similarity + + candidate_sessions = [] + session_ids = list(self.sessions.keys()) + if not session_ids: return [] + + summary_embeddings_list = [self.sessions[s]["summary_embedding"] for s in session_ids] + summary_embeddings_np = np.array(summary_embeddings_list, dtype=np.float32) + + dim = summary_embeddings_np.shape[1] + index = faiss.IndexFlatIP(dim) # Inner product for similarity + index.add(summary_embeddings_np) + + query_arr_np = np.array([query_vec], dtype=np.float32) + distances, indices = index.search(query_arr_np, min(top_k_sessions, len(session_ids))) + + results = [] + current_time_str = get_timestamp() + + for i, idx in enumerate(indices[0]): + if idx == -1: continue + + session_id = session_ids[idx] + session = self.sessions[session_id] + semantic_sim_score = float(distances[0][i]) # This is the dot product + + # Keyword similarity for session summary + session_keywords = set(session.get("summary_keywords", [])) + s_topic_keywords = 0 + if query_keywords and session_keywords: + intersection = len(query_keywords.intersection(session_keywords)) + union = len(query_keywords.union(session_keywords)) + if union > 0: s_topic_keywords = intersection / union + + # Time decay for session recency in search scoring + # time_decay_factor = compute_time_decay(session["timestamp"], current_time_str, tau_hours=recency_tau_search) + + # Combined score for session relevance + session_relevance_score = (semantic_sim_score + keyword_alpha * s_topic_keywords) + + if session_relevance_score >= segment_similarity_threshold: + matched_pages_in_session = [] + for page in session.get("details", []): + page_embedding = np.array(page["page_embedding"], dtype=np.float32) + # page_keywords = set(page.get("page_keywords", [])) + + page_sim_score = float(np.dot(page_embedding, query_vec)) + # Can also add keyword sim for pages if needed, but keeping it simpler for now + + if page_sim_score >= page_similarity_threshold: + matched_pages_in_session.append({"page_data": page, "score": page_sim_score}) + + if matched_pages_in_session: + # Update session access stats + session["N_visit"] += 1 + session["last_visit_time"] = current_time_str + session["access_count_lfu"] = session.get("access_count_lfu", 0) + 1 + self.access_frequency[session_id] = session["access_count_lfu"] + session["H_segment"] = compute_segment_heat(session) + self.rebuild_heap() # Heat changed + + results.append({ + "session_id": session_id, + "session_summary": session["summary"], + "session_relevance_score": session_relevance_score, + "matched_pages": sorted(matched_pages_in_session, key=lambda x: x["score"], reverse=True) # Sort pages by score + }) + + self.save() # Save changes from access updates + # Sort final results by session_relevance_score + return sorted(results, key=lambda x: x["session_relevance_score"], reverse=True) + + def save(self): + # Make a copy for saving to avoid modifying heap during iteration if it happens + # Though current heap is list of tuples, so direct modification risk is low + # sessions_to_save = {sid: data for sid, data in self.sessions.items()} + data_to_save = { + "sessions": self.sessions, + "access_frequency": dict(self.access_frequency), # Convert defaultdict to dict for JSON + # Heap is derived, no need to save typically, but can if desired for faster load + # "heap_snapshot": self.heap + } + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(data_to_save, f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving MidTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + self.sessions = data.get("sessions", {}) + self.access_frequency = defaultdict(int, data.get("access_frequency", {})) + self.rebuild_heap() # Rebuild heap from loaded sessions + print(f"MidTermMemory: Loaded from {self.file_path}. Sessions: {len(self.sessions)}.") + except FileNotFoundError: + print(f"MidTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + print(f"MidTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + print(f"MidTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/prompts.py b/memoryos-pypi/prompts.py new file mode 100644 index 0000000..48fbbd6 --- /dev/null +++ b/memoryos-pypi/prompts.py @@ -0,0 +1,233 @@ +""" +This file stores all the prompts used by the Memoryos system. +""" + +# Prompt for generating system response (from main_memoybank.py, generate_system_response_with_meta) +GENERATE_SYSTEM_RESPONSE_SYSTEM_PROMPT = ( + "As a communication expert with outstanding communication habits, you embody the role of {relationship} throughout the following dialogues.\n" + "Here are some of your distinctive personal traits and knowledge:\n{assistant_knowledge_text}\n" + "User's profile:\n" + "{meta_data_text}\n" + "Your task is to generate responses that align with these traits and maintain the tone.\n" +) + +GENERATE_SYSTEM_RESPONSE_USER_PROMPT = ( + "\n" + "Drawing from your recent conversation with the user:\n" + "{history_text}\n\n" + "\n" + "The memories linked to the ongoing conversation are:\n" + "{retrieval_text}\n\n" + "\n" + "During the conversation process between you and the user in the past, you found that the user has the following characteristics:\n" + "{background}\n\n" + "Now, please role-play as {relationship} to continue the dialogue between you and the user.\n" + "The user just said: {query}\n" + "Please respond to the user's statement using the following format (maximum 30 words, must be in English):\n " + "When answering questions, be sure to check whether the timestamp of the referenced information matches the timeframe of the question" +) + +# Prompt for assistant knowledge extraction (from utils.py, analyze_assistant_knowledge) +ASSISTANT_KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are an assistant knowledge extraction engine. Rules: +1. Extract ONLY explicit statements about the assistant's identity or knowledge. +2. Use concise and factual statements in the first person. +3. If no relevant information is found, output "None".""" + +ASSISTANT_KNOWLEDGE_EXTRACTION_USER_PROMPT = """ +# Assistant Knowledge Extraction Task +Analyze the conversation and extract any fact or identity traits about the assistant. +If no traits can be extracted, reply with "None". Use the following format for output: +The generated content should be as concise as possible — the more concise, the better. +【Assistant Knowledge】 +- [Fact 1] +- [Fact 2] +- (Or "None" if none found) + +Few-shot examples: +1. User: Can you recommend some movies. + AI: Yes, I recommend Interstellar. + Time: 2023-10-01 + 【Assistant Knowledge】 + - I recommend Interstellar on 2023-10-01. + +2. User: Can you help me with cooking recipes? + AI: Yes, I have extensive knowledge of cooking recipes and techniques. + Time: 2023-10-02 + 【Assistant Knowledge】 + - I have cooking recipes and techniques on 2023-10-02. + +3. User: That's interesting. I didn't know you could do that. + AI: I'm glad you find it interesting! + 【Assistant Knowledge】 + - None + +Conversation: +{conversation} +""" + +# Prompt for summarizing dialogs (from utils.py, gpt_summarize) +SUMMARIZE_DIALOGS_SYSTEM_PROMPT = "You are an expert in summarizing dialogue topics. Generate extremely concise and precise summaries. Be as brief as possible while capturing the essence." +SUMMARIZE_DIALOGS_USER_PROMPT = "Please generate an concise topic summary based on the following conversation. Keep it to 2-3 short sentences maximum:\n{dialog_text}\nConcise Summary:" + +# Prompt for multi-summary generation (from utils.py, gpt_generate_multi_summary) +MULTI_SUMMARY_SYSTEM_PROMPT = "You are an expert in analyzing dialogue topics. Generate concise summaries. No more than two topics. Be as brief as possible." +MULTI_SUMMARY_USER_PROMPT = ("Please analyze the following dialogue and generate extremely concise subtopic summaries (if applicable), with a maximum of two themes.\n" + "Each summary should be very brief - just a few words for the theme and content. Format as JSON array:\n" + "[\n {{\"theme\": \"Brief theme\", \"keywords\": [\"key1\", \"key2\"], \"content\": \"summary\"}}\n]\n" + "\nConversation content:\n{text}") + +# Prompt for personality analysis (NEW TEMPLATE) +PERSONALITY_ANALYSIS_SYSTEM_PROMPT = """You are a professional user preference analysis assistant. Your task is to analyze the user's personality preferences from the given dialogue based on the provided dimensions. + +For each dimension: +1. Carefully read the conversation and determine if the dimension is reflected. +2. If reflected, determine the user's preference level: High / Medium / Low, and briefly explain the reasoning, including time, people, and context if possible. +3. If the dimension is not reflected, do not extract or list it. + +Focus only on the user's preferences and traits for the personality analysis section. +Output only the user profile section. +""" + +PERSONALITY_ANALYSIS_USER_PROMPT = """Please analyze the latest user-AI conversation below and update the user profile based on the 90 personality preference dimensions. + +Here are the 90 dimensions and their explanations: + +[Psychological Model (Basic Needs & Personality)] +Extraversion: Preference for social activities. +Openness: Willingness to embrace new ideas and experiences. +Agreeableness: Tendency to be friendly and cooperative. +Conscientiousness: Responsibility and organizational ability. +Neuroticism: Emotional stability and sensitivity. +Physiological Needs: Concern for comfort and basic needs. +Need for Security: Emphasis on safety and stability. +Need for Belonging: Desire for group affiliation. +Need for Self-Esteem: Need for respect and recognition. +Cognitive Needs: Desire for knowledge and understanding. +Aesthetic Appreciation: Appreciation for beauty and art. +Self-Actualization: Pursuit of one's full potential. +Need for Order: Preference for cleanliness and organization. +Need for Autonomy: Preference for independent decision-making and action. +Need for Power: Desire to influence or control others. +Need for Achievement: Value placed on accomplishments. + +[AI Alignment Dimensions] +Helpfulness: Whether the AI's response is practically useful to the user. (This reflects user's expectation of AI) +Honesty: Whether the AI's response is truthful. (This reflects user's expectation of AI) +Safety: Avoidance of sensitive or harmful content. (This reflects user's expectation of AI) +Instruction Compliance: Strict adherence to user instructions. (This reflects user's expectation of AI) +Truthfulness: Accuracy and authenticity of content. (This reflects user's expectation of AI) +Coherence: Clarity and logical consistency of expression. (This reflects user's expectation of AI) +Complexity: Preference for detailed and complex information. +Conciseness: Preference for brief and clear responses. + +[Content Platform Interest Tags] +Science Interest: Interest in science topics. +Education Interest: Concern with education and learning. +Psychology Interest: Interest in psychology topics. +Family Concern: Interest in family and parenting. +Fashion Interest: Interest in fashion topics. +Art Interest: Engagement with or interest in art. +Health Concern: Concern with physical health and lifestyle. +Financial Management Interest: Interest in finance and budgeting. +Sports Interest: Interest in sports and physical activity. +Food Interest: Passion for cooking and cuisine. +Travel Interest: Interest in traveling and exploring new places. +Music Interest: Interest in music appreciation or creation. +Literature Interest: Interest in literature and reading. +Film Interest: Interest in movies and cinema. +Social Media Activity: Frequency and engagement with social media. +Tech Interest: Interest in technology and innovation. +Environmental Concern: Attention to environmental and sustainability issues. +History Interest: Interest in historical knowledge and topics. +Political Concern: Interest in political and social issues. +Religious Interest: Interest in religion and spirituality. +Gaming Interest: Enjoyment of video games or board games. +Animal Concern: Concern for animals or pets. +Emotional Expression: Preference for direct vs. restrained emotional expression. +Sense of Humor: Preference for humorous or serious communication style. +Information Density: Preference for detailed vs. concise information. +Language Style: Preference for formal vs. casual tone. +Practicality: Preference for practical advice vs. theoretical discussion. + +**Task Instructions:** +1. Review the existing user profile below +2. Analyze the new conversation for evidence of the 90 dimensions above +3. Update and integrate the findings into a comprehensive user profile +4. For each dimension that can be identified, use the format: Dimension ( Level(High/Medium/Low) ) +5. Include brief reasoning for each dimension when possible +6. Maintain existing insights from the old profile while incorporating new observations +7. If a dimension cannot be inferred from either the old profile or new conversation, do not include it + +**Existing User Profile:** +{existing_user_profile} + +**Latest User-AI Conversation:** +{conversation} + +**Updated User Profile:** +Please provide the comprehensive updated user profile below, combining insights from both the existing profile and new conversation:""" + +# Prompt for knowledge extraction (NEW) +KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT = """You are a knowledge extraction assistant. Your task is to extract user private data and assistant knowledge from conversations. + +Focus on: +1. User private data: personal information, preferences, or private facts about the user +2. Assistant knowledge: explicit statements about what the assistant did, provided, or demonstrated + +Be extremely concise and factual in your extractions. Use the shortest possible phrases. +""" + +KNOWLEDGE_EXTRACTION_USER_PROMPT = """Please extract user private data and assistant knowledge from the latest user-AI conversation below. + +Latest User-AI Conversation: +{conversation} + +【User Private Data】 +Extract personal information about the user. Be extremely concise - use shortest possible phrases: +- [Brief fact]: [Minimal context(Including entities and time)] +- [Brief fact]: [Minimal context(Including entities and time)] +- (If no private data found, write "None") + +【Assistant Knowledge】 +Extract what the assistant demonstrated. Use format "Assistant [action] at [time]". Be extremely brief: +- Assistant [brief action] at [time/context] +- Assistant [brief capability] during [brief context] +- (If no assistant knowledge found, write "None") +""" + +# Prompt for updating user profile (from utils.py, gpt_update_profile) +UPDATE_PROFILE_SYSTEM_PROMPT = "You are an expert in merging and updating user profiles. Integrate the new information into the old profile, maintaining consistency and improving the overall understanding of the user. Avoid redundancy. The new analysis is based on specific dimensions, try to incorporate these insights meaningfully." +UPDATE_PROFILE_USER_PROMPT = "Please update the following user profile based on the new analysis. If the old profile is empty or \"None\", create a new one based on the new analysis.\n\nOld User Profile:\n{old_profile}\n\nNew Analysis Data:\n{new_analysis}\n\nUpdated User Profile:" + +# Prompt for extracting theme (from utils.py, gpt_extract_theme) +EXTRACT_THEME_SYSTEM_PROMPT = "You are an expert in extracting the main theme from a text. Provide a concise theme." +EXTRACT_THEME_USER_PROMPT = "Please extract the main theme from the following text:\n{answer_text}\n\nTheme:" + + + +# Prompt for conversation continuity check (from dynamic_update.py, _is_conversation_continuing) +CONTINUITY_CHECK_SYSTEM_PROMPT = "You are a conversation continuity detector. Return ONLY 'true' or 'false'." +CONTINUITY_CHECK_USER_PROMPT = ("Determine if these two conversation pages are continuous (true continuation without topic shift).\n" + "Return ONLY \"true\" or \"false\".\n\n" + "Previous Page:\nUser: {prev_user}\nAssistant: {prev_agent}\n\n" + "Current Page:\nUser: {curr_user}\nAssistant: {curr_agent}\n\n" + "Continuous?") + +# Prompt for generating meta info (from dynamic_update.py, _generate_meta_info) +META_INFO_SYSTEM_PROMPT = ("""You are a conversation meta-summary updater. Your task is to: +1. Preserve relevant context from previous meta-summary +2. Integrate new information from current dialogue +3. Output ONLY the updated summary (no explanations)""" ) +META_INFO_USER_PROMPT = ("""Update the conversation meta-summary by incorporating the new dialogue while maintaining continuity. + + Guidelines: + 1. Start from the previous meta-summary (if exists) + 2. Add/update information based on the new dialogue + 3. Keep it concise (1-2 sentences max) + 4. Maintain context coherence + + Previous Meta-summary: {last_meta} + New Dialogue: + {new_dialogue} + + Updated Meta-summary:""") \ No newline at end of file diff --git a/memoryos-pypi/requirements.txt b/memoryos-pypi/requirements.txt new file mode 100644 index 0000000..0c8a8d4 --- /dev/null +++ b/memoryos-pypi/requirements.txt @@ -0,0 +1,23 @@ +# MemoryOS Core Dependencies +# Core scientific computing and ML libraries +numpy==1.24.* +sentence-transformers>=2.7.0,<3.0.0 # Updated for Qwen model support +transformers>=4.51.0 # Required for newer sentence-transformer features +FlagEmbedding>=1.2.9 # For BGE-M3 model support + +faiss-gpu>=1.7.0,<2.0.0 +httpx[socks] +openai +# Web framework (for demo) +flask>=2.0.0,<3.0.0 + +# Optional utilities +python-dotenv>=0.19.0,<2.0.0 + +# Development and testing (optional) +# pytest>=7.0.0,<8.0.0 +# pytest-asyncio>=0.20.0,<1.0.0 + +# Additional dependencies for compatibility +typing-extensions>=4.0.0,<5.0.0 +regex>=2022.1.18 diff --git a/memoryos-pypi/retriever.py b/memoryos-pypi/retriever.py new file mode 100644 index 0000000..1ea1568 --- /dev/null +++ b/memoryos-pypi/retriever.py @@ -0,0 +1,131 @@ +from collections import deque +import heapq +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Optional + +try: + from .utils import get_timestamp, OpenAIClient, run_parallel_tasks + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory + from .long_term import LongTermMemory +except ImportError: + from utils import get_timestamp, OpenAIClient, run_parallel_tasks + from short_term import ShortTermMemory + from mid_term import MidTermMemory + from long_term import LongTermMemory +# from .updater import Updater # Updater is not directly used by Retriever + +class Retriever: + def __init__(self, + mid_term_memory: MidTermMemory, + long_term_memory: LongTermMemory, + assistant_long_term_memory: Optional[LongTermMemory] = None, # Add assistant LTM + # client: OpenAIClient, # Not strictly needed if all LLM calls are within memory modules + queue_capacity=7): # Default from main_memoybank was 7 for retrieval_queue + # Short term memory is usually for direct context, not primary retrieval source here + # self.short_term_memory = short_term_memory + self.mid_term_memory = mid_term_memory + self.long_term_memory = long_term_memory + self.assistant_long_term_memory = assistant_long_term_memory # Store assistant LTM reference + # self.client = client + self.retrieval_queue_capacity = queue_capacity + # self.retrieval_queue = deque(maxlen=queue_capacity) # This was instance level, but retrieve returns it, so maybe not needed as instance var + + def _retrieve_mid_term_context(self, user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions): + """并行任务:从中期记忆检索""" + print("Retriever: Searching mid-term memory...") + matched_sessions = self.mid_term_memory.search_sessions( + query_text=user_query, + segment_similarity_threshold=segment_similarity_threshold, + page_similarity_threshold=page_similarity_threshold, + top_k_sessions=top_k_sessions + ) + + # Use a heap to get top N pages across all relevant sessions based on their scores + top_pages_heap = [] + page_counter = 0 # Add counter to ensure unique comparison + for session_match in matched_sessions: + for page_match in session_match.get("matched_pages", []): + page_data = page_match["page_data"] + page_score = page_match["score"] # Using the page score directly + + # Add session relevance score to page score or combine them? + # For now, using page_score. Could be: page_score * session_match["session_relevance_score"] + combined_score = page_score # Potentially adjust with session_relevance_score + + if len(top_pages_heap) < self.retrieval_queue_capacity: + heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) + page_counter += 1 + elif combined_score > top_pages_heap[0][0]: # If current page is better than the worst in heap + heapq.heappop(top_pages_heap) + heapq.heappush(top_pages_heap, (combined_score, page_counter, page_data)) + page_counter += 1 + + # Extract pages from heap, already sorted by heapq property (smallest first) + # We want highest scores, so either use a max-heap or sort after popping from min-heap. + retrieved_pages = [item[2] for item in sorted(top_pages_heap, key=lambda x: x[0], reverse=True)] + print(f"Retriever: Mid-term memory recalled {len(retrieved_pages)} pages.") + return retrieved_pages + + def _retrieve_user_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): + """并行任务:从用户长期知识检索""" + print("Retriever: Searching user long-term knowledge...") + retrieved_knowledge = self.long_term_memory.search_user_knowledge( + user_query, threshold=knowledge_threshold, top_k=top_k_knowledge + ) + print(f"Retriever: Long-term user knowledge recalled {len(retrieved_knowledge)} items.") + return retrieved_knowledge + + def _retrieve_assistant_knowledge(self, user_query, knowledge_threshold, top_k_knowledge): + """并行任务:从助手长期知识检索""" + if not self.assistant_long_term_memory: + print("Retriever: No assistant long-term memory provided, skipping assistant knowledge retrieval.") + return [] + + print("Retriever: Searching assistant long-term knowledge...") + retrieved_knowledge = self.assistant_long_term_memory.search_assistant_knowledge( + user_query, threshold=knowledge_threshold, top_k=top_k_knowledge + ) + print(f"Retriever: Long-term assistant knowledge recalled {len(retrieved_knowledge)} items.") + return retrieved_knowledge + + def retrieve_context(self, user_query: str, + user_id: str, # Needed for profile, can be used for context filtering if desired + segment_similarity_threshold=0.1, # From main_memoybank example + page_similarity_threshold=0.1, # From main_memoybank example + knowledge_threshold=0.01, # From main_memoybank example + top_k_sessions=5, # From MidTermMemory search default + top_k_knowledge=20 # Default for knowledge search + ): + print(f"Retriever: Starting PARALLEL retrieval for query: '{user_query[:50]}...'") + + # 并行执行三个检索任务 + tasks = [ + lambda: self._retrieve_mid_term_context(user_query, segment_similarity_threshold, page_similarity_threshold, top_k_sessions), + lambda: self._retrieve_user_knowledge(user_query, knowledge_threshold, top_k_knowledge), + lambda: self._retrieve_assistant_knowledge(user_query, knowledge_threshold, top_k_knowledge) + ] + + # 使用并行处理 + with ThreadPoolExecutor(max_workers=3) as executor: + futures = [] + for i, task in enumerate(tasks): + future = executor.submit(task) + futures.append((i, future)) + + results = [None] * 3 + for task_idx, future in futures: + try: + results[task_idx] = future.result() + except Exception as e: + print(f"Error in retrieval task {task_idx}: {e}") + results[task_idx] = [] + + retrieved_mid_term_pages, retrieved_user_knowledge, retrieved_assistant_knowledge = results + + return { + "retrieved_pages": retrieved_mid_term_pages or [], # List of page dicts + "retrieved_user_knowledge": retrieved_user_knowledge or [], # List of knowledge entry dicts + "retrieved_assistant_knowledge": retrieved_assistant_knowledge or [], # List of assistant knowledge entry dicts + "retrieved_at": get_timestamp() + } \ No newline at end of file diff --git a/memoryos-pypi/short_term.py b/memoryos-pypi/short_term.py new file mode 100644 index 0000000..37ffddc --- /dev/null +++ b/memoryos-pypi/short_term.py @@ -0,0 +1,64 @@ +import json +from collections import deque +try: + from .utils import get_timestamp, ensure_directory_exists +except ImportError: + from utils import get_timestamp, ensure_directory_exists + +class ShortTermMemory: + def __init__(self, file_path, max_capacity=10): + self.max_capacity = max_capacity + self.file_path = file_path + ensure_directory_exists(self.file_path) + self.memory = deque(maxlen=max_capacity) + self.load() + + def add_qa_pair(self, qa_pair): + # Ensure timestamp exists, add if not + if 'timestamp' not in qa_pair or not qa_pair['timestamp']: + qa_pair["timestamp"] = get_timestamp() + + self.memory.append(qa_pair) + print(f"ShortTermMemory: Added QA. User: {qa_pair.get('user_input','')[:30]}...") + self.save() + + def get_all(self): + return list(self.memory) + + def is_full(self): + return len(self.memory) >= self.max_capacity # Use >= to be safe + + def pop_oldest(self): + if self.memory: + msg = self.memory.popleft() + print("ShortTermMemory: Evicted oldest QA pair.") + self.save() + return msg + return None + + def save(self): + try: + with open(self.file_path, "w", encoding="utf-8") as f: + json.dump(list(self.memory), f, ensure_ascii=False, indent=2) + except IOError as e: + print(f"Error saving ShortTermMemory to {self.file_path}: {e}") + + def load(self): + try: + with open(self.file_path, "r", encoding="utf-8") as f: + data = json.load(f) + # Ensure items are loaded correctly, especially if file was empty or malformed + if isinstance(data, list): + self.memory = deque(data, maxlen=self.max_capacity) + else: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: Loaded from {self.file_path}.") + except FileNotFoundError: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: No history file found at {self.file_path}. Initializing new memory.") + except json.JSONDecodeError: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: Error decoding JSON from {self.file_path}. Initializing new memory.") + except Exception as e: + self.memory = deque(maxlen=self.max_capacity) + print(f"ShortTermMemory: An unexpected error occurred during load from {self.file_path}: {e}. Initializing new memory.") \ No newline at end of file diff --git a/memoryos-pypi/test.py b/memoryos-pypi/test.py new file mode 100644 index 0000000..7eb7d4f --- /dev/null +++ b/memoryos-pypi/test.py @@ -0,0 +1,93 @@ + +import os +from memoryos import Memoryos + +# --- Basic Configuration --- +USER_ID = "demo_user" +ASSISTANT_ID = "demo_assistant" +API_KEY = "" # Replace with your key +BASE_URL = "" # Optional: if using a custom OpenAI endpoint +DATA_STORAGE_PATH = "" +LLM_MODEL = "gpt-4o-mini" + +def simple_demo(): + print("MemoryOS Simple Demo") + + # 1. Initialize MemoryOS + print("Initializing MemoryOS...") + try: + memo = Memoryos( + user_id=USER_ID, + openai_api_key=API_KEY, + openai_base_url=BASE_URL, + data_storage_path=DATA_STORAGE_PATH, + llm_model=LLM_MODEL, + assistant_id=ASSISTANT_ID, + short_term_capacity=7, + mid_term_heat_threshold=1000, + retrieval_queue_capacity=10, + long_term_knowledge_capacity=100, + mid_term_similarity_threshold=0.6, + embedding_model_name="" + ) + print("MemoryOS initialized successfully!\n") + except Exception as e: + print(f"Error: {e}") + return + + # 2. Add some basic memories + print("Adding some memories...") + + memo.add_memory( + user_input="Hi! I'm Tom, I work as a data scientist in San Francisco.", + agent_response="Hello Tom! Nice to meet you. Data science is such an exciting field. What kind of data do you work with?" + ) + memo.add_memory( + user_input="I love hiking on weekends, especially in the mountains.", + agent_response="That sounds wonderful! Do you have a favorite trail or mountain you like to visit?" + ) + memo.add_memory( + user_input="Recently, I've been reading a lot about artificial intelligence.", + agent_response="AI is a fascinating topic! Are you interested in any specific area of AI?" + ) + memo.add_memory( + user_input="My favorite food is sushi, especially salmon nigiri.", + agent_response="Sushi is delicious! Have you ever tried making it at home?" + ) + memo.add_memory( + user_input="I have a golden retriever named Max.", + agent_response="Max must be adorable! How old is he?" + ) + memo.add_memory( + user_input="I traveled to Japan last year and visited Tokyo and Kyoto.", + agent_response="That must have been an amazing experience! What did you enjoy most about Japan?" + ) + memo.add_memory( + user_input="I'm currently learning how to play the guitar.", + agent_response="That's awesome! What songs are you practicing right now?" + ) + memo.add_memory( + user_input="I usually start my day with a cup of black coffee.", + agent_response="Coffee is a great way to kickstart the day! Do you prefer it hot or iced?" + ) + memo.add_memory( + user_input="My favorite movie genre is science fiction.", + agent_response="Sci-fi movies can be so imaginative! Do you have a favorite film?" + ) + memo.add_memory( + user_input="I enjoy painting landscapes in my free time.", + agent_response="Painting is such a creative hobby! Do you use oils, acrylics, or watercolors?" + ) + + + test_query = "What do you remember about my job?" + print(f"User: {test_query}") + + response = memo.get_response( + query=test_query, + ) + + print(f"Assistant: {response}") + +if __name__ == "__main__": + simple_demo() \ No newline at end of file diff --git a/memoryos-pypi/updater.py b/memoryos-pypi/updater.py new file mode 100644 index 0000000..831fe8a --- /dev/null +++ b/memoryos-pypi/updater.py @@ -0,0 +1,239 @@ +try: + from .utils import ( + generate_id, get_timestamp, + gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, + run_parallel_tasks + ) + from .short_term import ShortTermMemory + from .mid_term import MidTermMemory + from .long_term import LongTermMemory +except ImportError: + from utils import ( + generate_id, get_timestamp, + gpt_generate_multi_summary, check_conversation_continuity, generate_page_meta_info, OpenAIClient, + run_parallel_tasks + ) + from short_term import ShortTermMemory + from mid_term import MidTermMemory + from long_term import LongTermMemory + +from concurrent.futures import ThreadPoolExecutor, as_completed + +class Updater: + def __init__(self, + short_term_memory: ShortTermMemory, + mid_term_memory: MidTermMemory, + long_term_memory: LongTermMemory, + client: OpenAIClient, + topic_similarity_threshold=0.5, + llm_model="gpt-4o-mini"): + self.short_term_memory = short_term_memory + self.mid_term_memory = mid_term_memory + self.long_term_memory = long_term_memory + self.client = client + self.topic_similarity_threshold = topic_similarity_threshold + self.last_evicted_page_for_continuity = None # Tracks the actual last page object for continuity checks + self.llm_model = llm_model + + def _process_page_embedding_and_keywords(self, page_data): + """处理单个页面的embedding生成(关键词由multi-summary提供)""" + page_id = page_data.get("page_id", generate_id("page")) + + # 检查是否已有embedding + if "page_embedding" in page_data and page_data["page_embedding"]: + print(f"Updater: Page {page_id} already has embedding, skipping computation") + return page_data + + # 只处理embedding,关键词由multi-summary统一提供 + if not ("page_embedding" in page_data and page_data["page_embedding"]): + full_text = f"User: {page_data.get('user_input','')} Assistant: {page_data.get('agent_response','')}" + try: + embedding = self._get_embedding_for_page(full_text) + if embedding is not None: + from .utils import normalize_vector + page_data["page_embedding"] = normalize_vector(embedding).tolist() + print(f"Updater: Generated embedding for page {page_id}") + except Exception as e: + print(f"Error generating embedding for page {page_id}: {e}") + + # 设置空的关键词列表(将由multi-summary的关键词填充) + if "page_keywords" not in page_data: + page_data["page_keywords"] = [] + + return page_data + + def _get_embedding_for_page(self, text): + """获取页面embedding的辅助方法""" + from .utils import get_embedding + return get_embedding(text) + + def _update_linked_pages_meta_info(self, start_page_id, new_meta_info): + """ + Updates meta_info for a chain of connected pages starting from start_page_id. + This is a simplified version. Assumes that once a chain is broken (no pre_page), + we don't need to go further back. Updates forward as well. + """ + # Go backward + q = [start_page_id] + visited = {start_page_id} + + head = 0 + while head < len(q): + current_page_id = q[head] + head += 1 + page = self.mid_term_memory.get_page_by_id(current_page_id) + if page: + page["meta_info"] = new_meta_info + # Check previous page + prev_id = page.get("pre_page") + if prev_id and prev_id not in visited: + q.append(prev_id) + visited.add(prev_id) + # Check next page + next_id = page.get("next_page") + if next_id and next_id not in visited: + q.append(next_id) + visited.add(next_id) + if q: # If any pages were updated + self.mid_term_memory.save() # Save mid-term memory after updates + + def process_short_term_to_mid_term(self): + evicted_qas = [] + while self.short_term_memory.is_full(): + qa = self.short_term_memory.pop_oldest() + if qa and qa.get("user_input") and qa.get("agent_response"): + evicted_qas.append(qa) + + if not evicted_qas: + print("Updater: No QAs evicted from short-term memory.") + return + + print(f"Updater: Processing {len(evicted_qas)} QAs from short-term to mid-term.") + + # 1. Create page structures and handle continuity within the evicted batch + current_batch_pages = [] + temp_last_page_in_batch = self.last_evicted_page_for_continuity # Carry over from previous batch if any + + for qa_pair in evicted_qas: + current_page_obj = { + "page_id": generate_id("page"), + "user_input": qa_pair.get("user_input", ""), + "agent_response": qa_pair.get("agent_response", ""), + "timestamp": qa_pair.get("timestamp", get_timestamp()), + "preloaded": False, # Default for new pages from short-term + "analyzed": False, # Default for new pages from short-term + "pre_page": None, + "next_page": None, + "meta_info": None + } + + is_continuous = check_conversation_continuity(temp_last_page_in_batch, current_page_obj, self.client, model=self.llm_model) + + if is_continuous and temp_last_page_in_batch: + current_page_obj["pre_page"] = temp_last_page_in_batch["page_id"] + # The actual next_page for temp_last_page_in_batch will be set when it's stored in mid-term + # or if it's already there, it needs an update. This linking is tricky. + # For now, we establish the link from current to previous. + # MidTermMemory's update_page_connections can fix the other side if pages are already there. + + # Meta info generation based on continuity + last_meta = temp_last_page_in_batch.get("meta_info") + new_meta = generate_page_meta_info(last_meta, current_page_obj, self.client, model=self.llm_model) + current_page_obj["meta_info"] = new_meta + # If temp_last_page_in_batch was part of a chain, its meta_info and subsequent ones should update. + # This implies that meta_info should perhaps be updated more globally or propagated. + # For now, new_meta applies to current_page_obj and potentially its chain. + # We can call _update_linked_pages_meta_info if temp_last_page_in_batch is in mid-term already. + if temp_last_page_in_batch.get("page_id") and self.mid_term_memory.get_page_by_id(temp_last_page_in_batch["page_id"]): + self._update_linked_pages_meta_info(temp_last_page_in_batch["page_id"], new_meta) + else: + # Start of a new chain or no previous page + current_page_obj["meta_info"] = generate_page_meta_info(None, current_page_obj, self.client, model=self.llm_model) + + current_batch_pages.append(current_page_obj) + temp_last_page_in_batch = current_page_obj # Update for the next iteration in this batch + + # Update the global last evicted page for the next run of this method + if current_batch_pages: + self.last_evicted_page_for_continuity = current_batch_pages[-1] + + # 2. Consolidate text from current_batch_pages for multi-summary + if not current_batch_pages: + return + + input_text_for_summary = "\n".join([ + f"User: {p.get('user_input','')}\nAssistant: {p.get('agent_response','')}" + for p in current_batch_pages + ]) + + print("Updater: Generating multi-topic summary for the evicted batch...") + multi_summary_result = gpt_generate_multi_summary(input_text_for_summary, self.client, model=self.llm_model) + + # 3. Insert pages into MidTermMemory based on summaries + if multi_summary_result and multi_summary_result.get("summaries"): + for summary_item in multi_summary_result["summaries"]: + theme_summary = summary_item.get("content", "General summary of recent interactions.") + theme_keywords = summary_item.get("keywords", []) + print(f"Updater: Processing theme '{summary_item.get('theme')}' for mid-term insertion.") + + # Pass the already processed pages (with IDs, embeddings to be added by MidTermMemory if not present) + self.mid_term_memory.insert_pages_into_session( + summary_for_new_pages=theme_summary, + keywords_for_new_pages=theme_keywords, + pages_to_insert=current_batch_pages, # These pages now have pre_page, next_page, meta_info set up + similarity_threshold=self.topic_similarity_threshold + ) + else: + # Fallback: if no summaries, add as one session or handle as a single block + print("Updater: No specific themes from multi-summary. Adding batch as a general session.") + fallback_summary = "General conversation segment from short-term memory." + fallback_keywords = [] # Use empty keywords since multi-summary failed + self.mid_term_memory.insert_pages_into_session( + summary_for_new_pages=fallback_summary, + keywords_for_new_pages=list(fallback_keywords), + pages_to_insert=current_batch_pages, + similarity_threshold=self.topic_similarity_threshold + ) + + # After pages are in mid-term, ensure their connections are doubly linked if needed. + # MidTermMemory.insert_pages_into_session should ideally handle this internally + # or we might need a separate pass to solidify connections after all insertions. + for page in current_batch_pages: + if page.get("pre_page"): + self.mid_term_memory.update_page_connections(page["pre_page"], page["page_id"]) + if page.get("next_page"): + self.mid_term_memory.update_page_connections(page["page_id"], page["next_page"]) # This seems redundant if next is set by prior + if current_batch_pages: # Save if any pages were processed + self.mid_term_memory.save() + + def update_long_term_from_analysis(self, user_id, profile_analysis_result): + """ + Updates long-term memory based on the results of a personality/knowledge analysis. + profile_analysis_result is expected to be a dict with keys like "profile", "private", "assistant_knowledge". + """ + if not profile_analysis_result: + print("Updater: No analysis result provided for long-term update.") + return + + new_profile_text = profile_analysis_result.get("profile") + if new_profile_text and new_profile_text.lower() != "none": + print(f"Updater: Updating user profile for {user_id} in LongTermMemory.") + # 直接使用新的分析结果作为完整画像,因为它应该已经是集成后的结果 + self.long_term_memory.update_user_profile(user_id, new_profile_text, merge=False) + + user_private_knowledge = profile_analysis_result.get("private") + if user_private_knowledge and user_private_knowledge.lower() != "none": + print(f"Updater: Adding user private knowledge for {user_id} to LongTermMemory.") + # Split if multiple lines, assuming each line is a distinct piece of knowledge + for line in user_private_knowledge.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.long_term_memory.add_user_knowledge(line.strip()) + + assistant_knowledge_text = profile_analysis_result.get("assistant_knowledge") + if assistant_knowledge_text and assistant_knowledge_text.lower() != "none": + print("Updater: Adding assistant knowledge to LongTermMemory.") + for line in assistant_knowledge_text.split('\n'): + if line.strip() and line.strip().lower() not in ["none", "- none", "- none."]: + self.long_term_memory.add_assistant_knowledge(line.strip()) + + # LongTermMemory.save() is called by its add/update methods \ No newline at end of file diff --git a/memoryos-pypi/utils.py b/memoryos-pypi/utils.py new file mode 100644 index 0000000..40d95ff --- /dev/null +++ b/memoryos-pypi/utils.py @@ -0,0 +1,386 @@ +import time +import uuid +import openai +import numpy as np +from sentence_transformers import SentenceTransformer +import json +import os +import inspect +from functools import wraps +try: + from . import prompts # 尝试相对导入 +except ImportError: + import prompts # 回退到绝对导入 +from openai import OpenAI +from concurrent.futures import ThreadPoolExecutor, as_completed +import threading + +def clean_reasoning_model_output(text): + """ + 清理推理模型输出中的标签 + 适配推理模型(如o1系列)的输出格式 + """ + if not text: + return text + + import re + # 移除...标签及其内容 + cleaned_text = re.sub(r'.*?', '', text, flags=re.DOTALL) + # 清理可能产生的多余空白行 + cleaned_text = re.sub(r'\n\s*\n\s*\n', '\n\n', cleaned_text) + # 移除开头和结尾的空白 + cleaned_text = cleaned_text.strip() + + return cleaned_text + +# ---- OpenAI Client ---- +class OpenAIClient: + def __init__(self, api_key, base_url=None, max_workers=5): + self.api_key = api_key + self.base_url = base_url if base_url else "https://api.openai.com/v1" + # The openai library looks for OPENAI_API_KEY and OPENAI_BASE_URL env vars by default + # or they can be passed directly to the client. + # For simplicity and explicit control, we'll pass them to the client constructor. + self.client = OpenAI(api_key=self.api_key, base_url=self.base_url) + self.executor = ThreadPoolExecutor(max_workers=max_workers) + self._lock = threading.Lock() + + def chat_completion(self, model, messages, temperature=0.7, max_tokens=2000): + print(f"Calling OpenAI API. Model: {model}") + try: + response = self.client.chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens + ) + raw_content = response.choices[0].message.content.strip() + # 自动清理推理模型的标签 + cleaned_content = clean_reasoning_model_output(raw_content) + return cleaned_content + except Exception as e: + print(f"Error calling OpenAI API: {e}") + # Fallback or error handling + return "Error: Could not get response from LLM." + + def chat_completion_async(self, model, messages, temperature=0.7, max_tokens=2000): + """异步版本的chat_completion""" + return self.executor.submit(self.chat_completion, model, messages, temperature, max_tokens) + + def batch_chat_completion(self, requests): + """ + 并行处理多个LLM请求 + requests: List of dict with keys: model, messages, temperature, max_tokens + """ + futures = [] + for req in requests: + future = self.chat_completion_async( + model=req.get("model", "gpt-4o-mini"), + messages=req["messages"], + temperature=req.get("temperature", 0.7), + max_tokens=req.get("max_tokens", 2000) + ) + futures.append(future) + + results = [] + for future in as_completed(futures): + try: + result = future.result() + results.append(result) + except Exception as e: + print(f"Error in batch completion: {e}") + results.append("Error: Could not get response from LLM.") + + return results + + def shutdown(self): + """关闭线程池""" + self.executor.shutdown(wait=True) + +# ---- Parallel Processing Utilities ---- +def run_parallel_tasks(tasks, max_workers=3): + """ + 并行执行任务列表 + tasks: List of callable functions + """ + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = [executor.submit(task) for task in tasks] + results = [] + for future in as_completed(futures): + try: + result = future.result() + results.append(result) + except Exception as e: + print(f"Error in parallel task: {e}") + results.append(None) + return results + +# ---- Basic Utilities ---- +def get_timestamp(): + return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + +def generate_id(prefix="id"): + return f"{prefix}_{uuid.uuid4().hex[:8]}" + +def ensure_directory_exists(path): + os.makedirs(os.path.dirname(path), exist_ok=True) + +# ---- Embedding Utilities ---- +_model_cache = {} +_embedding_cache = {} # 添加embedding缓存 + +def _get_valid_kwargs(func, kwargs): + """Helper to filter kwargs for a given function's signature.""" + try: + sig = inspect.signature(func) + param_keys = set(sig.parameters.keys()) + return {k: v for k, v in kwargs.items() if k in param_keys} + except (ValueError, TypeError): + # Fallback for functions/methods where signature inspection is not straightforward + return kwargs + +def get_embedding(text, model_name="all-MiniLM-L6-v2", use_cache=True, **kwargs): + """ + 获取文本的embedding向量。 + 支持多种主流模型,能自动适应不同库的调用方式。 + - SentenceTransformer模型: e.g., 'all-MiniLM-L6-v2', 'Qwen/Qwen3-Embedding-0.6B' + - FlagEmbedding模型: e.g., 'BAAI/bge-m3' + + :param text: 输入文本。 + :param model_name: Hugging Face上的模型名称。 + :param use_cache: 是否使用内存缓存。 + :param kwargs: 传递给模型构造函数或encode方法的额外参数。 + - for Qwen: `model_kwargs`, `tokenizer_kwargs`, `prompt_name="query"` + - for BGE-M3: `use_fp16=True`, `max_length=8192` + :return: 文本的embedding向量 (numpy array)。 + """ + model_config_key = json.dumps({"model_name": model_name, **kwargs}, sort_keys=True) + + if use_cache: + cache_key = f"{model_config_key}::{hash(text)}" + if cache_key in _embedding_cache: + return _embedding_cache[cache_key] + + # --- Model Loading --- + model_init_key = json.dumps({"model_name": model_name, **{k:v for k,v in kwargs.items() if k not in ['batch_size', 'max_length']}}, sort_keys=True) + if model_init_key not in _model_cache: + print(f"Loading model: {model_name}...") + if 'bge-m3' in model_name.lower(): + try: + from FlagEmbedding import BGEM3FlagModel + init_kwargs = _get_valid_kwargs(BGEM3FlagModel.__init__, kwargs) + print(f"-> Using BGEM3FlagModel with init kwargs: {init_kwargs}") + _model_cache[model_init_key] = BGEM3FlagModel(model_name, **init_kwargs) + except ImportError: + raise ImportError("Please install FlagEmbedding: 'pip install -U FlagEmbedding' to use bge-m3 model.") + else: # Default handler for SentenceTransformer-based models (like Qwen, all-MiniLM, etc.) + try: + from sentence_transformers import SentenceTransformer + init_kwargs = _get_valid_kwargs(SentenceTransformer.__init__, kwargs) + print(f"-> Using SentenceTransformer with init kwargs: {init_kwargs}") + _model_cache[model_init_key] = SentenceTransformer(model_name, **init_kwargs) + except ImportError: + raise ImportError("Please install sentence-transformers: 'pip install -U sentence-transformers' to use this model.") + + model = _model_cache[model_init_key] + + # --- Encoding --- + embedding = None + if 'bge-m3' in model_name.lower(): + encode_kwargs = _get_valid_kwargs(model.encode, kwargs) + print(f"-> Encoding with BGEM3FlagModel using kwargs: {encode_kwargs}") + result = model.encode([text], **encode_kwargs) + embedding = result['dense_vecs'][0] + else: # Default to SentenceTransformer-based models + encode_kwargs = _get_valid_kwargs(model.encode, kwargs) + print(f"-> Encoding with SentenceTransformer using kwargs: {encode_kwargs}") + embedding = model.encode([text], **encode_kwargs)[0] + + if use_cache: + cache_key = f"{model_config_key}::{hash(text)}" + _embedding_cache[cache_key] = embedding + if len(_embedding_cache) > 10000: + keys_to_remove = list(_embedding_cache.keys())[:1000] + for key in keys_to_remove: + try: + del _embedding_cache[key] + except KeyError: + pass + print("Cleaned embedding cache to prevent memory overflow") + + return embedding + + +def clear_embedding_cache(): + """清空embedding缓存""" + global _embedding_cache + _embedding_cache.clear() + print("Embedding cache cleared") + +def normalize_vector(vec): + vec = np.array(vec, dtype=np.float32) + norm = np.linalg.norm(vec) + if norm == 0: + return vec + return vec / norm + +# ---- Time Decay Function ---- +def compute_time_decay(event_timestamp_str, current_timestamp_str, tau_hours=24): + from datetime import datetime + fmt = "%Y-%m-%d %H:%M:%S" + try: + t_event = datetime.strptime(event_timestamp_str, fmt) + t_current = datetime.strptime(current_timestamp_str, fmt) + delta_hours = (t_current - t_event).total_seconds() / 3600.0 + return np.exp(-delta_hours / tau_hours) + except ValueError: # Handle cases where timestamp might be invalid + return 0.1 # Default low recency + + +# ---- LLM-based Utility Functions ---- + +def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"): + dialog_text = "\n".join([f"User: {d.get('user_input','')} Assistant: {d.get('agent_response','')}" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.SUMMARIZE_DIALOGS_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.SUMMARIZE_DIALOGS_USER_PROMPT.format(dialog_text=dialog_text)} + ] + print("Calling LLM to generate topic summary...") + return client.chat_completion(model=model, messages=messages) + +def gpt_generate_multi_summary(text, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.MULTI_SUMMARY_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.MULTI_SUMMARY_USER_PROMPT.format(text=text)} + ] + print("Calling LLM to generate multi-topic summary...") + response_text = client.chat_completion(model=model, messages=messages) + try: + summaries = json.loads(response_text) + except json.JSONDecodeError: + print(f"Warning: Could not parse multi-summary JSON: {response_text}") + summaries = [] # Return empty list or a default structure + return {"input": text, "summaries": summaries} + + +def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", existing_user_profile="None"): + """ + Analyze and update user personality profile from dialogs + 结合现有画像和新对话,直接输出更新后的完整画像 + """ + conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.PERSONALITY_ANALYSIS_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.PERSONALITY_ANALYSIS_USER_PROMPT.format( + conversation=conversation, + existing_user_profile=existing_user_profile + )} + ] + print("Calling LLM for user profile analysis and update...") + result_text = client.chat_completion(model=model, messages=messages) + return result_text.strip() if result_text else "None" + + +def gpt_knowledge_extraction(dialogs, client: OpenAIClient, model="gpt-4o-mini"): + """Extract user private data and assistant knowledge from dialogs""" + conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs]) + messages = [ + {"role": "system", "content": prompts.KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.KNOWLEDGE_EXTRACTION_USER_PROMPT.format( + conversation=conversation + )} + ] + print("Calling LLM for knowledge extraction...") + result_text = client.chat_completion(model=model, messages=messages) + + private_data = "None" + assistant_knowledge = "None" + + try: + if "【User Private Data】" in result_text: + private_data_start = result_text.find("【User Private Data】") + len("【User Private Data】") + if "【Assistant Knowledge】" in result_text: + private_data_end = result_text.find("【Assistant Knowledge】") + private_data = result_text[private_data_start:private_data_end].strip() + + assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") + assistant_knowledge = result_text[assistant_knowledge_start:].strip() + else: + private_data = result_text[private_data_start:].strip() + elif "【Assistant Knowledge】" in result_text: + assistant_knowledge_start = result_text.find("【Assistant Knowledge】") + len("【Assistant Knowledge】") + assistant_knowledge = result_text[assistant_knowledge_start:].strip() + + except Exception as e: + print(f"Error parsing knowledge extraction: {e}. Raw result: {result_text}") + + return { + "private": private_data if private_data else "None", + "assistant_knowledge": assistant_knowledge if assistant_knowledge else "None" + } + + +# Keep the old function for backward compatibility, but mark as deprecated +def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini", known_user_traits="None"): + """ + DEPRECATED: Use gpt_user_profile_analysis and gpt_knowledge_extraction instead. + This function is kept for backward compatibility only. + """ + # Call the new functions + profile = gpt_user_profile_analysis(dialogs, client, model, known_user_traits) + knowledge_data = gpt_knowledge_extraction(dialogs, client, model) + + return { + "profile": profile, + "private": knowledge_data["private"], + "assistant_knowledge": knowledge_data["assistant_knowledge"] + } + + +def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.UPDATE_PROFILE_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.UPDATE_PROFILE_USER_PROMPT.format(old_profile=old_profile, new_analysis=new_analysis)} + ] + print("Calling LLM to update user profile...") + return client.chat_completion(model=model, messages=messages) + +def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"): + messages = [ + {"role": "system", "content": prompts.EXTRACT_THEME_SYSTEM_PROMPT}, + {"role": "user", "content": prompts.EXTRACT_THEME_USER_PROMPT.format(answer_text=answer_text)} + ] + print("Calling LLM to extract theme...") + return client.chat_completion(model=model, messages=messages) + + + +# ---- Functions from dynamic_update.py (to be used by Updater class) ---- +def check_conversation_continuity(previous_page, current_page, client: OpenAIClient, model="gpt-4o-mini"): + prev_user = previous_page.get("user_input", "") if previous_page else "" + prev_agent = previous_page.get("agent_response", "") if previous_page else "" + + user_prompt = prompts.CONTINUITY_CHECK_USER_PROMPT.format( + prev_user=prev_user, + prev_agent=prev_agent, + curr_user=current_page.get("user_input", ""), + curr_agent=current_page.get("agent_response", "") + ) + messages = [ + {"role": "system", "content": prompts.CONTINUITY_CHECK_SYSTEM_PROMPT}, + {"role": "user", "content": user_prompt} + ] + response = client.chat_completion(model=model, messages=messages, temperature=0.0, max_tokens=10) + return response.strip().lower() == "true" + +def generate_page_meta_info(last_page_meta, current_page, client: OpenAIClient, model="gpt-4o-mini"): + current_conversation = f"User: {current_page.get('user_input', '')}\nAssistant: {current_page.get('agent_response', '')}" + user_prompt = prompts.META_INFO_USER_PROMPT.format( + last_meta=last_page_meta if last_page_meta else "None", + new_dialogue=current_conversation + ) + messages = [ + {"role": "system", "content": prompts.META_INFO_SYSTEM_PROMPT}, + {"role": "user", "content": user_prompt} + ] + return client.chat_completion(model=model, messages=messages, temperature=0.3, max_tokens=100).strip() \ No newline at end of file