Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 0 additions & 53 deletions examples/basic_modules/nebular_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,56 +52,6 @@ def embed_memory_item(memory: str) -> list[float]:
return embedding_list


def example_multi_db(db_name: str = "paper"):
# Step 1: Build factory config
config = GraphDBConfigFactory(
backend="nebular",
config={
"uri": json.loads(os.getenv("NEBULAR_HOSTS", "localhost")),
"user": os.getenv("NEBULAR_USER", "root"),
"password": os.getenv("NEBULAR_PASSWORD", "xxxxxx"),
"space": db_name,
"use_multi_db": True,
"auto_create": True,
"embedding_dimension": embedder_dimension,
},
)

# Step 2: Instantiate the graph store
graph = GraphStoreFactory.from_config(config)
graph.clear()

# Step 3: Create topic node
topic = TextualMemoryItem(
memory="This research addresses long-term multi-UAV navigation for energy-efficient communication coverage.",
metadata=TreeNodeTextualMemoryMetadata(
memory_type="LongTermMemory",
key="Multi-UAV Long-Term Coverage",
hierarchy_level="topic",
type="fact",
memory_time="2024-01-01",
source="file",
sources=["paper://multi-uav-coverage/intro"],
status="activated",
confidence=95.0,
tags=["UAV", "coverage", "multi-agent"],
entities=["UAV", "coverage", "navigation"],
visibility="public",
updated_at=datetime.now().isoformat(),
embedding=embed_memory_item(
"This research addresses long-term "
"multi-UAV navigation for "
"energy-efficient communication "
"coverage."
),
),
)

graph.add_node(
id=topic.id, memory=topic.memory, metadata=topic.metadata.model_dump(exclude_none=True)
)


def example_shared_db(db_name: str = "shared-traval-group"):
"""
Example: Single(Shared)-DB multi-tenant (logical isolation)
Expand Down Expand Up @@ -404,9 +354,6 @@ def example_complex_shared_db(db_name: str = "shared-traval-group-complex"):


if __name__ == "__main__":
print("\n=== Example: Multi-DB ===")
example_multi_db(db_name="paper-new")

print("\n=== Example: Single-DB ===")
example_shared_db(db_name="shared_traval_group-new")

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
##############################################################################

name = "MemoryOS"
version = "1.0.1"
version = "1.1.1"
description = "Intelligence Begins with Memory"
license = {text = "Apache-2.0"}
readme = "README.md"
Expand Down
2 changes: 1 addition & 1 deletion src/memos/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "1.0.1"
__version__ = "1.1.1"

from memos.configs.mem_cube import GeneralMemCubeConfig
from memos.configs.mem_os import MOSConfig
Expand Down
3 changes: 3 additions & 0 deletions src/memos/api/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def get_message(
)
response.raise_for_status()
response_data = response.json()

return MemOSGetMessagesResponse(**response_data)
except Exception as e:
logger.error(f"Failed to get messages (retry {retry + 1}/3): {e}")
Expand All @@ -74,6 +75,7 @@ def add_message(
)
response.raise_for_status()
response_data = response.json()

return MemOSAddResponse(**response_data)
except Exception as e:
logger.error(f"Failed to add memory (retry {retry + 1}/3): {e}")
Expand Down Expand Up @@ -102,6 +104,7 @@ def search_memory(
)
response.raise_for_status()
response_data = response.json()

return MemOSSearchResponse(**response_data)
except Exception as e:
logger.error(f"Failed to search memory (retry {retry + 1}/3): {e}")
Expand Down
12 changes: 12 additions & 0 deletions src/memos/api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,6 +530,13 @@ def create_user_config(user_name: str, user_id: str) -> tuple[MOSConfig, General
"embedder": APIConfig.get_embedder_config(),
"internet_retriever": internet_config,
"reranker": APIConfig.get_reranker_config(),
"reorganize": os.getenv("MOS_ENABLE_REORGANIZE", "false").lower()
== "true",
"memory_size": {
"WorkingMemory": os.getenv("NEBULAR_WORKING_MEMORY", 20),
"LongTermMemory": os.getenv("NEBULAR_LONGTERM_MEMORY", 1e6),
"UserMemory": os.getenv("NEBULAR_USER_MEMORY", 1e6),
},
},
},
"act_mem": {}
Expand Down Expand Up @@ -587,6 +594,11 @@ def get_default_cube_config() -> GeneralMemCubeConfig | None:
"reorganize": os.getenv("MOS_ENABLE_REORGANIZE", "false").lower()
== "true",
"internet_retriever": internet_config,
"memory_size": {
"WorkingMemory": os.getenv("NEBULAR_WORKING_MEMORY", 20),
"LongTermMemory": os.getenv("NEBULAR_LONGTERM_MEMORY", 1e6),
"UserMemory": os.getenv("NEBULAR_USER_MEMORY", 1e6),
},
},
},
"act_mem": {}
Expand Down
2 changes: 1 addition & 1 deletion src/memos/api/product_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class GetMessagesData(BaseModel):
"""Data model for get messages response based on actual API."""

message_detail_list: list[MessageDetail] = Field(
default_factory=list, alias="memory_detail_list", description="List of message details"
default_factory=list, alias="message_detail_list", description="List of message details"
)


Expand Down
52 changes: 48 additions & 4 deletions src/memos/graph_dbs/nebular.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,19 @@ def _get_or_create_shared_client(cls, cfg: NebulaGraphDBConfig) -> tuple[str, "N
client = cls._CLIENT_CACHE.get(key)
if client is None:
# Connection setting

tmp_client = NebulaClient(
hosts=cfg.uri,
username=cfg.user,
password=cfg.password,
session_config=SessionConfig(graph=None),
session_pool_config=SessionPoolConfig(size=1, wait_timeout=3000),
)
try:
cls._ensure_space_exists(tmp_client, cfg)
finally:
tmp_client.close()

conn_conf: ConnectionConfig | None = getattr(cfg, "conn_config", None)
if conn_conf is None:
conn_conf = ConnectionConfig.from_defults(
Expand Down Expand Up @@ -317,6 +330,7 @@ def __init__(self, config: NebulaGraphDBConfig):
}
"""

assert config.use_multi_db is False, "Multi-DB MODE IS NOT SUPPORTED"
self.config = config
self.db_name = config.space
self.user_name = config.user_name
Expand Down Expand Up @@ -349,7 +363,7 @@ def __init__(self, config: NebulaGraphDBConfig):
if (str(self.embedding_dimension) != str(self.default_memory_dimension))
else "embedding"
)
self.system_db_name = "system" if config.use_multi_db else config.space
self.system_db_name = config.space

# ---- NEW: pool acquisition strategy
# Get or create a shared pool from the class-level cache
Expand Down Expand Up @@ -436,7 +450,7 @@ def remove_oldest_memory(
WHERE n.memory_type = '{memory_type}'
{optional_condition}
ORDER BY n.updated_at DESC
OFFSET {keep_latest}
OFFSET {int(keep_latest)}
DETACH DELETE n
"""
self.execute_query(query)
Expand Down Expand Up @@ -481,7 +495,7 @@ def node_not_exist(self, scope: str, user_name: str | None = None) -> int:
user_name = user_name if user_name else self.config.user_name
filter_clause = f'n.memory_type = "{scope}" AND n.user_name = "{user_name}"'
query = f"""
MATCH (n@Memory)
MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */)
WHERE {filter_clause}
RETURN n.id AS id
LIMIT 1
Expand Down Expand Up @@ -838,7 +852,7 @@ def get_neighbors_by_tag(
query = f"""
LET tag_list = {tag_list_literal}

MATCH (n@Memory)
MATCH (n@Memory /*+ INDEX(idx_memory_user_name) */)
WHERE {where_clause}
RETURN {return_fields},
size( filter( n.tags, t -> t IN tag_list ) ) AS overlap_count
Expand Down Expand Up @@ -1392,6 +1406,17 @@ def get_structure_optimization_candidates(
logger.error(f"Failed : {e}, traceback: {traceback.format_exc()}")
return candidates

@timed
def drop_database(self) -> None:
"""
Permanently delete the entire database this instance is using.
WARNING: This operation is destructive and cannot be undone.
"""
raise ValueError(
f"Refusing to drop protected database: `{self.db_name}` in "
f"Shared Database Multi-Tenant mode"
)

@timed
def detect_conflicts(self) -> list[tuple[str, str]]:
"""
Expand Down Expand Up @@ -1462,6 +1487,25 @@ def merge_nodes(self, id1: str, id2: str) -> str:
"""
raise NotImplementedError

@classmethod
def _ensure_space_exists(cls, tmp_client, cfg):
"""Lightweight check to ensure target graph (space) exists."""
db_name = getattr(cfg, "space", None)
if not db_name:
logger.warning("[NebulaGraphDBSync] No `space` specified in cfg.")
return

try:
res = tmp_client.execute("SHOW GRAPHS;")
existing = {row.values()[0].as_string() for row in res}
if db_name not in existing:
tmp_client.execute(f"CREATE GRAPH IF NOT EXISTS `{db_name}` TYPED MemOSBgeM3Type;")
logger.info(f"✅ Graph `{db_name}` created before session binding.")
else:
logger.debug(f"Graph `{db_name}` already exists.")
except Exception:
logger.exception("[NebulaGraphDBSync] Failed to ensure space exists")

@timed
def _ensure_database_exists(self):
graph_type_name = "MemOSBgeM3Type"
Expand Down
4 changes: 2 additions & 2 deletions src/memos/memories/textual/tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,10 +326,10 @@ def load(self, dir: str) -> None:
except Exception as e:
logger.error(f"An error occurred while loading memories: {e}")

def dump(self, dir: str) -> None:
def dump(self, dir: str, include_embedding: bool = False) -> None:
"""Dump memories to os.path.join(dir, self.config.memory_filename)"""
try:
json_memories = self.graph_store.export_graph()
json_memories = self.graph_store.export_graph(include_embedding=include_embedding)

os.makedirs(dir, exist_ok=True)
memory_file = os.path.join(dir, self.config.memory_filename)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def __init__(
"LongTermMemory": 1500,
"UserMemory": 480,
}
logger.info(f"MemorySize is {self.memory_size}")
self._threshold = threshold
self.is_reorganize = is_reorganize
self.reorganizer = GraphStructureReorganizer(
Expand Down
56 changes: 0 additions & 56 deletions tests/api/test_start_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,62 +82,6 @@ def mock_mos():
yield mock_instance


def test_configure(mock_mos):
"""Test configuration endpoint."""
with patch("memos.api.start_api.MOS_INSTANCE", None):
# Use a valid configuration
valid_config = {
"user_id": "test_user",
"session_id": "test_session",
"enable_textual_memory": True,
"enable_activation_memory": False,
"top_k": 5,
"chat_model": {
"backend": "openai",
"config": {
"model_name_or_path": "gpt-3.5-turbo",
"api_key": "test_key",
"temperature": 0.7,
"api_base": "https://api.openai.com/v1",
},
},
"mem_reader": {
"backend": "simple_struct",
"config": {
"llm": {
"backend": "openai",
"config": {
"model_name_or_path": "gpt-3.5-turbo",
"api_key": "test_key",
"temperature": 0.7,
"api_base": "https://api.openai.com/v1",
},
},
"embedder": {
"backend": "sentence_transformer",
"config": {"model_name_or_path": "all-MiniLM-L6-v2"},
},
"chunker": {
"backend": "sentence",
"config": {
"tokenizer_or_token_counter": "gpt2",
"chunk_size": 512,
"chunk_overlap": 128,
"min_sentences_per_chunk": 1,
},
},
},
},
}
response = client.post("/configure", json=valid_config)
assert response.status_code == 200
assert response.json() == {
"code": 200,
"message": "Configuration set successfully",
"data": None,
}


def test_configure_error(mock_mos):
"""Test configuration endpoint with error."""
with patch("memos.api.start_api.MOS_INSTANCE", None):
Expand Down