Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
208 changes: 161 additions & 47 deletions docker/.env.example
Original file line number Diff line number Diff line change
@@ -1,60 +1,174 @@
# MemOS Environment Variables Configuration
TZ=Asia/Shanghai
# MemOS Environment Variables (core runtime)
# Legend: [required] needed for default startup; others are optional or conditional per comments.

MOS_CUBE_PATH="/tmp/data_test" # Path to memory storage (e.g. /tmp/data_test)
MOS_ENABLE_DEFAULT_CUBE_CONFIG="true" # Enable default cube config (true/false)
## Base
TZ=Asia/Shanghai
ENV_NAME=PLAYGROUND_OFFLINE # Tag shown in DingTalk notifications (e.g., PROD_ONLINE/TEST); no runtime effect unless ENABLE_DINGDING_BOT=true
MOS_CUBE_PATH=/tmp/data_test # local data path
MEMOS_BASE_PATH=. # CLI/SDK cache path
MOS_ENABLE_DEFAULT_CUBE_CONFIG=true # enable default cube config
MOS_ENABLE_REORGANIZE=false # enable memory reorg
MOS_TEXT_MEM_TYPE=general_text # general_text | tree_text
ASYNC_MODE=sync # async/sync, used in default cube config

# OpenAI Configuration
OPENAI_API_KEY="sk-xxx" # Your OpenAI API key
OPENAI_API_BASE="http://xxx" # OpenAI API base URL (default: https://api.openai.com/v1)
## User/session defaults
MOS_USER_ID=root
MOS_SESSION_ID=default_session
MOS_MAX_TURNS_WINDOW=20
MOS_TOP_K=50

# MemOS Chat Model Configuration
## Chat LLM (main dialogue)
MOS_CHAT_MODEL=gpt-4o-mini
MOS_CHAT_TEMPERATURE=0.8
MOS_MAX_TOKENS=8000
MOS_TOP_P=0.9
MOS_TOP_K=50
MOS_CHAT_MODEL_PROVIDER=openai

# graph db
# neo4j
NEO4J_BACKEND=xxx
NEO4J_URI=bolt://xxx
NEO4J_USER=xxx
NEO4J_PASSWORD=xxx
MOS_NEO4J_SHARED_DB=xxx
NEO4J_DB_NAME=xxx

# tetxmem reog
MOS_ENABLE_REORGANIZE=false

# MemOS User Configuration
MOS_USER_ID=root
MOS_SESSION_ID=default_session
MOS_MAX_TURNS_WINDOW=20
MOS_CHAT_MODEL_PROVIDER=openai # openai | huggingface | vllm
MOS_MODEL_SCHEMA=memos.configs.llm.VLLMLLMConfig # vllm only: config class path; keep default unless you extend it
OPENAI_API_KEY=sk-xxx # [required] when provider=openai
OPENAI_API_BASE=https://api.openai.com/v1 # [required] base for the key
OPENAI_BASE_URL= # compatibility for eval/scheduler
VLLM_API_KEY= # required when provider=vllm
VLLM_API_BASE=http://localhost:8088/v1 # required when provider=vllm

# MemRader Configuration
## MemReader / retrieval LLM
MEMRADER_MODEL=gpt-4o-mini
MEMRADER_API_KEY=sk-xxx
MEMRADER_API_BASE=http://xxx:3000/v1
MEMRADER_API_KEY=sk-xxx # [required] can reuse OPENAI_API_KEY
MEMRADER_API_BASE=http://localhost:3000/v1 # [required] base for the key
MEMRADER_MAX_TOKENS=5000

#embedding & rerank
## Embedding & rerank
EMBEDDING_DIMENSION=1024
MOS_EMBEDDER_BACKEND=universal_api
MOS_EMBEDDER_MODEL=bge-m3
MOS_EMBEDDER_API_BASE=http://xxx
MOS_EMBEDDER_API_KEY=EMPTY
MOS_RERANKER_BACKEND=http_bge
MOS_RERANKER_URL=http://xxx
# Ollama Configuration (for embeddings)
#OLLAMA_API_BASE=http://xxx

# milvus for pref mem
MILVUS_URI=http://xxx
MILVUS_USER_NAME=xxx
MILVUS_PASSWORD=xxx

# pref mem
MOS_EMBEDDER_BACKEND=universal_api # universal_api | ollama
MOS_EMBEDDER_PROVIDER=openai # required when universal_api
MOS_EMBEDDER_MODEL=bge-m3 # siliconflow → use BAAI/bge-m3
MOS_EMBEDDER_API_BASE=http://localhost:8000/v1 # required when universal_api
MOS_EMBEDDER_API_KEY=EMPTY # required when universal_api
OLLAMA_API_BASE=http://localhost:11434 # required when backend=ollama
MOS_RERANKER_BACKEND=http_bge # http_bge | http_bge_strategy | cosine_local
MOS_RERANKER_URL=http://localhost:8001 # required when backend=http_bge*
MOS_RERANKER_MODEL=bge-reranker-v2-m3 # siliconflow → use BAAI/bge-reranker-v2-m3
MOS_RERANKER_HEADERS_EXTRA= # extra headers, JSON string
MOS_RERANKER_STRATEGY=single_turn
MOS_RERANK_SOURCE= # optional rerank scope, e.g., history/stream/custom

## Internet search & preference memory
ENABLE_INTERNET=false
BOCHA_API_KEY= # required if ENABLE_INTERNET=true
SEARCH_MODE=fast # fast | fine | mixture
FAST_GRAPH=false
BM25_CALL=false
VEC_COT_CALL=false
FINE_STRATEGY=rewrite # rewrite | recreate | deep_search
ENABLE_ACTIVATION_MEMORY=false
ENABLE_PREFERENCE_MEMORY=true
RETURN_ORIGINAL_PREF_MEM=true
PREFERENCE_ADDER_MODE=fast # fast | safe
DEDUP_PREF_EXP_BY_TEXTUAL=false

## Reader chunking
MEM_READER_BACKEND=simple_struct # simple_struct | strategy_struct
MEM_READER_CHAT_CHUNK_TYPE=default # default | content_length
MEM_READER_CHAT_CHUNK_TOKEN_SIZE=1600 # tokens per chunk (default mode)
MEM_READER_CHAT_CHUNK_SESS_SIZE=10 # sessions per chunk (default mode)
MEM_READER_CHAT_CHUNK_OVERLAP=2 # overlap between chunks

## Scheduler (MemScheduler / API)
MOS_ENABLE_SCHEDULER=false
MOS_SCHEDULER_TOP_K=10
MOS_SCHEDULER_ACT_MEM_UPDATE_INTERVAL=300
MOS_SCHEDULER_CONTEXT_WINDOW_SIZE=5
MOS_SCHEDULER_THREAD_POOL_MAX_WORKERS=10000
MOS_SCHEDULER_CONSUME_INTERVAL_SECONDS=0.01
MOS_SCHEDULER_ENABLE_PARALLEL_DISPATCH=true
MOS_SCHEDULER_ENABLE_ACTIVATION_MEMORY=false
API_SCHEDULER_ON=true
API_SEARCH_WINDOW_SIZE=5
API_SEARCH_HISTORY_TURNS=5

## Graph / vector stores
NEO4J_BACKEND=neo4j-community # neo4j-community | neo4j | nebular | polardb
NEO4J_URI=bolt://localhost:7687 # required when backend=neo4j*
NEO4J_USER=neo4j # required when backend=neo4j*
NEO4J_PASSWORD=12345678 # required when backend=neo4j*
NEO4J_DB_NAME=neo4j # required for shared-db mode
MOS_NEO4J_SHARED_DB=false
QDRANT_HOST=localhost
QDRANT_PORT=6333
MILVUS_URI=http://localhost:19530 # required when ENABLE_PREFERENCE_MEMORY=true
MILVUS_USER_NAME=root # same as above
MILVUS_PASSWORD=12345678 # same as above
NEBULAR_HOSTS=["localhost"]
NEBULAR_USER=root
NEBULAR_PASSWORD=xxxxxx
NEBULAR_SPACE=shared-tree-textual-memory
NEBULAR_WORKING_MEMORY=20
NEBULAR_LONGTERM_MEMORY=1000000
NEBULAR_USER_MEMORY=1000000

## Relational DB (user manager / PolarDB)
MOS_USER_MANAGER_BACKEND=sqlite # sqlite | mysql
MYSQL_HOST=localhost # required when backend=mysql
MYSQL_PORT=3306
MYSQL_USERNAME=root
MYSQL_PASSWORD=12345678
MYSQL_DATABASE=memos_users
MYSQL_CHARSET=utf8mb4
POLAR_DB_HOST=localhost
POLAR_DB_PORT=5432
POLAR_DB_USER=root
POLAR_DB_PASSWORD=123456
POLAR_DB_DB_NAME=shared_memos_db
POLAR_DB_USE_MULTI_DB=false

## Redis (scheduler queue) — fill only if you want scheduler queues in Redis; otherwise in-memory queue is used
REDIS_HOST=localhost # global Redis endpoint (preferred over MEMSCHEDULER_*)
REDIS_PORT=6379
REDIS_DB=0
REDIS_PASSWORD=
REDIS_SOCKET_TIMEOUT=
REDIS_SOCKET_CONNECT_TIMEOUT=
MEMSCHEDULER_REDIS_HOST= # fallback keys if not using the global ones
MEMSCHEDULER_REDIS_PORT=
MEMSCHEDULER_REDIS_DB=
MEMSCHEDULER_REDIS_PASSWORD=
MEMSCHEDULER_REDIS_TIMEOUT=
MEMSCHEDULER_REDIS_CONNECT_TIMEOUT=

## MemScheduler LLM
MEMSCHEDULER_OPENAI_API_KEY= # LLM key for scheduler’s own calls (OpenAI-compatible); leave empty if scheduler not using LLM
MEMSCHEDULER_OPENAI_BASE_URL= # Base URL for the above; can reuse OPENAI_API_BASE
MEMSCHEDULER_OPENAI_DEFAULT_MODEL=gpt-4o-mini

## Nacos (optional config center)
NACOS_ENABLE_WATCH=false
NACOS_WATCH_INTERVAL=60
NACOS_SERVER_ADDR=
NACOS_DATA_ID=
NACOS_GROUP=DEFAULT_GROUP
NACOS_NAMESPACE=
AK=
SK=

## DingTalk bot & OSS upload
ENABLE_DINGDING_BOT=false # set true -> fields below required
DINGDING_ACCESS_TOKEN_USER=
DINGDING_SECRET_USER=
DINGDING_ACCESS_TOKEN_ERROR=
DINGDING_SECRET_ERROR=
DINGDING_ROBOT_CODE=
DINGDING_APP_KEY=
DINGDING_APP_SECRET=
OSS_ENDPOINT= # bot image upload depends on OSS
OSS_REGION=
OSS_BUCKET_NAME=
OSS_ACCESS_KEY_ID=
OSS_ACCESS_KEY_SECRET=
OSS_PUBLIC_BASE_URL=

## Logging / external sink
CUSTOM_LOGGER_URL=
CUSTOM_LOGGER_TOKEN=
CUSTOM_LOGGER_WORKERS=2

## SDK / external client
MEMOS_API_KEY=
MEMOS_BASE_URL=https://memos.memtensor.cn/api/openmem/v1