Skip to content

Commit

Permalink
feat(memories): Refactor memory system and add a new agent memory `Lo…
Browse files Browse the repository at this point in the history
…ngtermAgentMemory` (#435)

Co-authored-by: Wendong-Fan <133094783+Wendong-Fan@users.noreply.github.com>
Co-authored-by: Wendong <w3ndong.fan@gmail.com>
Co-authored-by: Zecheng Zhang <zecheng@alumni.stanford.edu>
  • Loading branch information
4 people committed Apr 1, 2024
1 parent 0a44d21 commit d7e4924
Show file tree
Hide file tree
Showing 18 changed files with 875 additions and 266 deletions.
8 changes: 4 additions & 4 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from camel.agents import BaseAgent
from camel.configs import ChatGPTConfig
from camel.memories import (
BaseMemory,
AgentMemory,
ChatHistoryMemory,
MemoryRecord,
ScoreBasedContextCreator,
Expand Down Expand Up @@ -81,7 +81,7 @@ class ChatAgent(BaseAgent):
responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
model_config (BaseConfig, optional): Configuration options for the
LLM model. (default: :obj:`None`)
memory (BaseMemory, optional): The agent memory for managing chat
memory (AgentMemory, optional): The agent memory for managing chat
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
(default: :obj:`None`)
message_window_size (int, optional): The maximum number of previous
Expand All @@ -105,7 +105,7 @@ def __init__(
system_message: BaseMessage,
model_type: Optional[ModelType] = None,
model_config: Optional[BaseConfig] = None,
memory: Optional[BaseMemory] = None,
memory: Optional[AgentMemory] = None,
message_window_size: Optional[int] = None,
token_limit: Optional[int] = None,
output_language: Optional[str] = None,
Expand Down Expand Up @@ -137,7 +137,7 @@ def __init__(
self.model_backend.token_counter,
self.model_token_limit,
)
self.memory: BaseMemory = memory or ChatHistoryMemory(
self.memory: AgentMemory = memory or ChatHistoryMemory(
context_creator, window_size=message_window_size)

self.terminated: bool = False
Expand Down
4 changes: 2 additions & 2 deletions camel/agents/critic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from colorama import Fore

from camel.agents import ChatAgent
from camel.memories import BaseMemory
from camel.memories import AgentMemory
from camel.messages import BaseMessage
from camel.responses import ChatAgentResponse
from camel.types import ModelType
Expand Down Expand Up @@ -50,7 +50,7 @@ def __init__(
system_message: BaseMessage,
model_type: ModelType = ModelType.GPT_3_5_TURBO,
model_config: Optional[Any] = None,
memory: Optional[BaseMemory] = None,
memory: Optional[AgentMemory] = None,
message_window_size: int = 6,
retry_attempts: int = 2,
verbose: bool = False,
Expand Down
22 changes: 16 additions & 6 deletions camel/memories/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,26 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

from .records import MemoryRecord, ContextRecord
from .base import BaseMemory
from .context_creators.base import BaseContextCreator
from .base import MemoryBlock, AgentMemory, BaseContextCreator
from .context_creators.score_based import ScoreBasedContextCreator
from .chat_history_memory import ChatHistoryMemory
from .blocks.chat_history_block import ChatHistoryBlock
from .blocks.vectordb_block import VectorDBBlock
from .agent_memories import (
VectorDBMemory,
ChatHistoryMemory,
LongtermAgentMemory,
)

__all__ = [
'MemoryRecord',
'ContextRecord',
'BaseMemory',
'MemoryBlock',
"AgentMemory",
'BaseContextCreator',
'ScoreBasedContextCreator',
'ChatHistoryMemory',
"BaseContextCreator",
"ScoreBasedContextCreator",
'VectorDBMemory',
'ChatHistoryBlock',
'VectorDBBlock',
'LongtermAgentMemory',
]
156 changes: 156 additions & 0 deletions camel/memories/agent_memories.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

from typing import List, Optional

from camel.memories import (
AgentMemory,
BaseContextCreator,
ContextRecord,
MemoryRecord,
)
from camel.memories.blocks import ChatHistoryBlock, VectorDBBlock
from camel.storages import BaseKeyValueStorage, BaseVectorStorage
from camel.types import OpenAIBackendRole


class ChatHistoryMemory(AgentMemory):
r"""An agent memory wrapper of :obj:`ChatHistoryBlock`.
Args:
context_creator (BaseContextCreator): A model context creator.
storage (BaseKeyValueStorage, optional): A storage backend for storing
chat history. If `None`, an :obj:`InMemoryKeyValueStorage`
will be used. (default: :obj:`None`)
window_size (int, optional): The number of recent chat messages to
retrieve. If not provided, the entire chat history will be
retrieved. (default: :obj:`None`)
"""

def __init__(
self,
context_creator: BaseContextCreator,
storage: Optional[BaseKeyValueStorage] = None,
window_size: Optional[int] = None,
) -> None:
if window_size is not None and not isinstance(window_size, int):
raise TypeError("`window_size` must be an integer or None.")
if window_size is not None and window_size < 0:
raise ValueError("`window_size` must be non-negative.")
self._context_creator = context_creator
self._window_size = window_size
self._chat_history_block = ChatHistoryBlock(storage=storage)

def retrieve(self) -> List[ContextRecord]:
return self._chat_history_block.retrieve(self._window_size)

def write_records(self, records: List[MemoryRecord]) -> None:
self._chat_history_block.write_records(records)

def get_context_creator(self) -> BaseContextCreator:
return self._context_creator

def clear(self) -> None:
self._chat_history_block.clear()


class VectorDBMemory(AgentMemory):
r"""An agent memory wrapper of :obj:`VectorDBBlock`. This memory queries
messages stored in the vector database. Notice that the most recent
messages will not be added to the context.
Args:
context_creator (BaseContextCreator): A model context creator.
storage (BaseVectorStorage, optional): A vector storage storage. If
`None`, an :obj:`QdrantStorage` will be used.
(default: :obj:`None`)
retrieve_limit (int, optional): The maximum number of messages
to be added into the context. (default: :obj:`3`)
"""

def __init__(
self,
context_creator: BaseContextCreator,
storage: Optional[BaseVectorStorage] = None,
retrieve_limit: int = 3,
) -> None:
self._context_creator = context_creator
self._retrieve_limit = retrieve_limit
self._vectordb_block = VectorDBBlock(storage=storage)

self._current_topic: str = ""

def retrieve(self) -> List[ContextRecord]:
return self._vectordb_block.retrieve(
self._current_topic,
limit=self._retrieve_limit,
)

def write_records(self, records: List[MemoryRecord]) -> None:
# Assume the last user input is the current topic.
for record in records:
if record.role_at_backend == OpenAIBackendRole.USER:
self._current_topic = record.message.content
self._vectordb_block.write_records(records)

def get_context_creator(self) -> BaseContextCreator:
return self._context_creator


class LongtermAgentMemory(AgentMemory):
r"""An implementation of the :obj:`AgentMemory` abstract base class for
augumenting ChatHistoryMemory with VectorDBMemory.
"""

def __init__(
self,
context_creator: BaseContextCreator,
chat_history_block: Optional[ChatHistoryBlock] = None,
vector_db_block: Optional[VectorDBBlock] = None,
retrieve_limit: int = 3,
) -> None:
self.chat_history_block = chat_history_block or ChatHistoryBlock()
self.vector_db_block = vector_db_block or VectorDBBlock()
self.retrieve_limit = retrieve_limit
self._context_creator = context_creator
self._current_topic: str = ""

def get_context_creator(self) -> BaseContextCreator:
return self._context_creator

def retrieve(self) -> List[ContextRecord]:
chat_history = self.chat_history_block.retrieve()
vector_db_retrieve = self.vector_db_block.retrieve(
self._current_topic, self.retrieve_limit)
return chat_history[:1] + vector_db_retrieve + chat_history[1:]

def write_records(self, records: List[MemoryRecord]) -> None:
r"""Converts the provided chat messages into vector representations and
writes them to the vector database.
Args:
records (List[MemoryRecord]): Messages to be added to the vector
database.
"""
self.vector_db_block.write_records(records)
self.chat_history_block.write_records(records)

for record in records:
if record.role_at_backend == OpenAIBackendRole.USER:
self._current_topic = record.message.content

def clear(self) -> None:
r"""Removes all records from the memory."""
self.chat_history_block.clear()
self.vector_db_block.clear()
129 changes: 97 additions & 32 deletions camel/memories/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,42 +15,19 @@
from abc import ABC, abstractmethod
from typing import List, Tuple

from camel.memories import MemoryRecord
from camel.memories import ContextRecord, MemoryRecord
from camel.messages import OpenAIMessage
from camel.utils import BaseTokenCounter


class BaseMemory(ABC):
r"""An abstract base class that defines the foundational operations for a
memory component within an agent's memory system.
The memory component is tasked with functions like saving chat histories,
fetching or storing information in vector databases, and other related
operations. Every memory system should incorporate at least one instance of
a subclass derived from :obj:`BaseMemory`.
These instances, known as "memories", typically communicate using the
:obj:`MemoryRecord` object. Usually, a memory has at least one "storage"
mechanism, allowing it to interface with various storage systems, such as
disks or vector databases. Additionally, some memories might embed other
memory instances, enabling them to function as a high-level controller
within the broader memory system.
By default, when executing the :obj:`step()` method, an agent retrieves
messages from its designated memory and combines them with an incoming
message for input to the agent. Subsequently, both the response message and
the incoming messages are archived back into the memory.
class MemoryBlock(ABC):
r"""An abstract class serves as the fundamental component within the agent
memory system. This class is equipped with "write" and "clear" functions.
However, it intentionally does not define a retrieval interface, as the
structure of the data to be retrieved may vary in different types of
memory blocks.
"""

@abstractmethod
def get_context(self) -> Tuple[List[OpenAIMessage], int]:
r"""Gets chat context with a proper size for the agent.
Returns:
(List[OpenAIMessage], int): A tuple containing the constructed
context in OpenAIMessage format and the total token count.
"""
pass

@abstractmethod
def write_records(self, records: List[MemoryRecord]) -> None:
r"""Writes records to the memory, appending them to existing ones.
Expand All @@ -70,6 +47,94 @@ def write_record(self, record: MemoryRecord) -> None:

@abstractmethod
def clear(self) -> None:
r"""Clears all messages from the memory.
r"""Clears all messages from the memory."""
pass


class BaseContextCreator(ABC):
r"""An abstract base class defining the interface for context creation
strategies.
This class provides a foundational structure for different strategies to
generate conversational context from a list of context records. The
primary goal is to create a context that is aligned with a specified token
count limit, allowing subclasses to define their specific approach.
Subclasses should implement the :obj:`token_counter`,:obj: `token_limit`,
and :obj:`create_context` methods to provide specific context creation
logic.
Attributes:
token_counter (BaseTokenCounter): A token counter instance responsible
for counting tokens in a message.
token_limit (int): The maximum number of tokens allowed in the
generated context.
"""

@property
@abstractmethod
def token_counter(self) -> BaseTokenCounter:
pass

@property
@abstractmethod
def token_limit(self) -> int:
pass

@abstractmethod
def create_context(
self,
records: List[ContextRecord],
) -> Tuple[List[OpenAIMessage], int]:
r"""An abstract method to create conversational context from the chat
history.
Constructs the context from provided records. The specifics of how this
is done and how the token count is managed should be provided by
subclasses implementing this method. The output messages order
should keep same as the input order.
Args:
records (List[ContextRecord]): A list of context records from
which to generate the context.
Returns:
Tuple[List[OpenAIMessage], int]: A tuple containing the constructed
context in OpenAIMessage format and the total token count.
"""
pass


class AgentMemory(MemoryBlock, ABC):
r"""Represents a specialized form of `MemoryBlock`, uniquely designed for
direct integration with an agent. Two key abstract functions, "retrieve"
and "get_context_creator", are used for generating model context based on
the memory records stored within the AgentMemory.
"""

@abstractmethod
def retrieve(self) -> List[ContextRecord]:
r"""Get a record list from the memory for creating model context.
Returns:
List[ContextRecord]: A record list for creating model context.
"""
pass

@abstractmethod
def get_context_creator(self) -> BaseContextCreator:
r"""Gets context creator.
Returns:
BaseContextCreator: A model context creator.
"""
pass

def get_context(self) -> Tuple[List[OpenAIMessage], int]:
r"""Gets chat context with a proper size for the agent from the memory.
Returns:
(List[OpenAIMessage], int): A tuple containing the constructed
context in OpenAIMessage format and the total token count.
"""
return self.get_context_creator().create_context(self.retrieve())
Loading

0 comments on commit d7e4924

Please sign in to comment.