Skip to content

Commit

Permalink
Chat Memory Buffer (#6857)
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich committed Jul 12, 2023
1 parent 0ce5aa0 commit e82d6af
Show file tree
Hide file tree
Showing 14 changed files with 332 additions and 128 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# ChangeLog

## Unreleased

### New Features
- Added basic chat buffer memory to agents / chat engines (#6857)

## [v0.7.5] - 2023-07-11

### New Features
Expand Down
1 change: 1 addition & 0 deletions docs/api_reference/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ API Reference for the ``llama-index`` package.
node.rst
llm_predictor.rst
llms.rst
memory.rst
node_postprocessor.rst
storage.rst
composability.rst
Expand Down
8 changes: 8 additions & 0 deletions docs/api_reference/memory.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
.. _Ref-Memory
Memory
======

.. automodule:: llama_index.memory
:members:
:inherited-members:
12 changes: 8 additions & 4 deletions llama_index/agent/context_retriever_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Context retriever agent."""

from typing import List, Optional
from typing import List, Type, Optional

from llama_index.agent.openai_agent import (
DEFAULT_MAX_FUNCTION_CALLS,
Expand All @@ -13,6 +13,7 @@
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.llms.base import ChatMessage
from llama_index.llms.openai import OpenAI
from llama_index.memory import BaseMemory, ChatMemoryBuffer
from llama_index.prompts.prompts import QuestionAnswerPrompt
from llama_index.response.schema import RESPONSE_TYPE
from llama_index.schema import NodeWithScore
Expand Down Expand Up @@ -59,15 +60,15 @@ def __init__(
qa_prompt: QuestionAnswerPrompt,
context_separator: str,
llm: OpenAI,
chat_history: List[ChatMessage],
memory: BaseMemory,
prefix_messages: List[ChatMessage],
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
) -> None:
super().__init__(
llm=llm,
chat_history=chat_history,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
Expand All @@ -87,6 +88,8 @@ def from_tools_and_retriever(
context_separator: str = "\n",
llm: Optional[OpenAI] = None,
chat_history: Optional[List[ChatMessage]] = None,
memory: Optional[BaseMemory] = None,
memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
verbose: bool = False,
max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
callback_manager: Optional[CallbackManager] = None,
Expand All @@ -108,6 +111,7 @@ def from_tools_and_retriever(
"""
qa_prompt = qa_prompt or DEFAULT_QA_PROMPT
chat_history = chat_history or []
memory = memory or memory_cls.from_defaults(chat_history=chat_history)
llm = llm or OpenAI(model=DEFAULT_MODEL_NAME)
if not isinstance(llm, OpenAI):
raise ValueError("llm must be a OpenAI instance")
Expand All @@ -132,7 +136,7 @@ def from_tools_and_retriever(
qa_prompt=qa_prompt,
context_separator=context_separator,
llm=llm,
chat_history=chat_history,
memory=memory,
prefix_messages=prefix_messages,
verbose=verbose,
max_function_calls=max_function_calls,
Expand Down
Loading

0 comments on commit e82d6af

Please sign in to comment.