Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 56 additions & 0 deletions examples/supported_llms/openai_custom_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from openai import OpenAI

from memori import Memori

import os
import dotenv

# Load environment variables from .env file
dotenv.load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
model = os.getenv("OPENAI_MODEL", "gpt-4")

client = OpenAI(api_key=api_key, base_url=base_url)

print("Initializing Memori with OpenAI...")
openai_memory = Memori(
database_connect="sqlite:///openai_custom_demo.db",
conscious_ingest=True,
auto_ingest=True,
verbose=True,
api_key=api_key,
base_url=base_url,
model=model,
)

print("Enabling memory tracking...")
openai_memory.enable()

print(f"Memori OpenAI Example - Chat with {model} while memory is being tracked")
print("Type 'exit' or press Ctrl+C to quit")
print("-" * 50)

while 1:
try:
user_input = input("User: ")
if not user_input.strip():
continue

if user_input.lower() == "exit":
print("Goodbye!")
break

print("Processing your message with memory tracking...")
response = client.chat.completions.create(
model=model, messages=[{"role": "user", "content": user_input}]
)
print(f"AI: {response.choices[0].message.content}")
print() # Add blank line for readability
except (EOFError, KeyboardInterrupt):
print("\nExiting...")
break
except Exception as e:
print(f"Error: {e}")
continue
4 changes: 4 additions & 0 deletions memori/agents/memory_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,9 @@ async def process_conversation_async(
"content": f"Process this conversation for enhanced memory storage:\n\n{conversation_text}\n{context_info}",
},
],
metadata=[
"INTERNAL_MEMORY_PROCESSING"
], # Internal metadata tag
response_format=ProcessedLongTermMemory,
temperature=0.1, # Low temperature for consistent processing
)
Expand Down Expand Up @@ -417,6 +420,7 @@ async def _process_with_fallback_parsing(
"content": f"Process this conversation for enhanced memory storage:\n\n{conversation_text}\n{context_info}",
},
],
metadata=["INTERNAL_MEMORY_PROCESSING"], # Internal metadata tag
temperature=0.1, # Low temperature for consistent processing
max_tokens=2000, # Ensure enough tokens for full response
)
Expand Down
4 changes: 4 additions & 0 deletions memori/agents/retrieval_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,9 @@ def plan_search(self, query: str, context: str | None = None) -> MemorySearchQue
"content": prompt,
},
],
metadata=[
"INTERNAL_MEMORY_PROCESSING"
], # Internal metadata tag
response_format=MemorySearchQuery,
temperature=0.1,
)
Expand Down Expand Up @@ -656,6 +659,7 @@ def _plan_search_with_fallback_parsing(self, query: str) -> MemorySearchQuery:
"content": prompt,
},
],
metadata=["INTERNAL_MEMORY_PROCESSING"], # Internal metadata tag
temperature=0.1,
max_tokens=1000, # Ensure enough tokens for full response
)
Expand Down
39 changes: 12 additions & 27 deletions memori/integrations/openai_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,33 +246,18 @@ def _inject_context_for_enabled_instances(cls, options, client_type):
def _is_internal_agent_call(cls, json_data):
"""Check if this is an internal agent processing call that should not be recorded."""
try:
messages = json_data.get("messages", [])
for message in messages:
content = message.get("content", "")
if isinstance(content, str):
# Check for specific internal agent processing patterns
# Made patterns more specific to avoid false positives
internal_patterns = [
"Process this conversation for enhanced memory storage:",
"Enhanced memory processing:",
"Memory classification:",
"Search for relevant memories:",
"Analyze conversation for:",
"Extract entities from:",
"Categorize the following conversation:",
# More specific patterns to avoid blocking legitimate conversations
"INTERNAL_MEMORY_PROCESSING:",
"AGENT_PROCESSING_MODE:",
"MEMORY_AGENT_TASK:",
]

# Only flag as internal if it matches specific patterns AND has no user role
for pattern in internal_patterns:
if pattern in content:
# Double-check: if this is a user message, don't filter it
if message.get("role") == "user":
continue
return True
openai_metadata = json_data.get("metadata", [])

# Check for specific internal agent metadata flags
if isinstance(openai_metadata, list):
internal_metadata = [
"INTERNAL_MEMORY_PROCESSING", # used in memory agent and retrieval agent
"AGENT_PROCESSING_MODE",
"MEMORY_AGENT_TASK",
]
for internal in internal_metadata:
if internal in openai_metadata:
return True

return False

Expand Down
Loading