-
Notifications
You must be signed in to change notification settings - Fork 42
/
chat_session.py
211 lines (179 loc) · 8.44 KB
/
chat_session.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
import random
import re
import uuid
from typing import Callable, List, Optional
from slashgpt.chat_config import ChatConfig
from slashgpt.chat_history import ChatHistory
from slashgpt.dbs.db_base import VectorDBBase
from slashgpt.function.jupyter_runtime import PythonRuntime
from slashgpt.history.storage.abstract import ChatHistoryAbstractStorage
from slashgpt.history.storage.memory import ChatHistoryMemoryStorage
from slashgpt.llms.model import LlmModel
from slashgpt.manifest import Manifest
from slashgpt.utils.print import print_debug, print_error, print_info
class ChatSession:
"""It represents a chat session with a particular AI agent."""
def __init__(
self,
config: ChatConfig,
default_llm_model: LlmModel = None,
user_id: Optional[str] = None,
history_engine: ChatHistoryAbstractStorage = None,
manifest={},
agent_name: str = "GPT",
intro: bool = True,
restore: bool = False,
memory: Optional[dict] = None,
):
"""
Args:
config (ChatConfig or its subclass): Chat configuration (LLM models and engines)
default_llm_model (LlmModel, optional): Default LLM model
user_id (str, optional): User Id (for history)
history_engine (ChatHistoryAbstractStorage, optional): Histroy engine
agent_name (str, optional): Display name of agent
intro (bool, optional): True if the introduction message should be appended.
restore (bool, optional): True if we are restoring an existing session.
memory (dict, optional): The initial value of short term memory
"""
self.config: ChatConfig = config
"""Configuration Object (ChatConfig), which specifies accessible LLM models"""
self.agent_name: str = agent_name
"""Display name of the AI agent (str)"""
self.manifest: Manifest = Manifest(manifest if manifest else {}, config.base_path, agent_name)
"""Manifest which specifies the behavior of the AI agent (Manifest)"""
self.user_id: str = user_id if user_id else str(uuid.uuid4())
"""Specified user id or randomly generated uuid (str)"""
self.history: ChatHistory = ChatHistory(history_engine or ChatHistoryMemoryStorage(self.user_id, agent_name))
"""Chat history (ChatHistory)"""
self.memory: Optional[dict] = memory
"""Short term memory (dict, optional)"""
# Load the model name and make it sure that we have required keys
if self.manifest.model():
llm_model = self.config.get_llm_model_from_manifest(self.manifest)
else:
if default_llm_model:
llm_model = default_llm_model
else:
llm_model = self.config.get_default_llm_model()
self.set_llm_model(llm_model)
# Load the prompt, fill variables and append it as the system message
if self.config.verbose and memory is not None:
print_debug(f"memory = {memory}")
self.prompt: str = self.manifest.prompt_data(config.manifests if hasattr(config, "manifests") else {}, memory)
"""Prompt for the AI agent (str)"""
if self.prompt and not restore:
self.append_message("system", self.prompt, True)
# Prepare embedded database index
self.vector_db: VectorDBBase = self.manifest.get_vector_db(config)
"""Associated vector database (DBPinecone, optional, to be virtualized)"""
# Load functions file if it is specified
self.functions: List[dict] = self.manifest.functions()
"""List of function definitions (list, optional)"""
if self.functions and self.config.verbose:
print_debug(self.functions)
self.intro_message: Optional[str] = self.__set_intro(intro)
"""Introduction message (str, optional)"""
def set_llm_model(self, llm_model: LlmModel):
"""Set the LLM model"""
if llm_model.check_api_key():
self.llm_model = llm_model
else:
print_error("You need to set " + llm_model.get("api_key") + " to use this model. ")
if self.config.verbose:
print_debug(f"Model = {self.llm_model.name()}")
"""
Append a message to the chat session, specifying the role ("user", "system" or "function").
In case of a function message, the name specifies the function name.
"""
def append_message(self, role: str, message: str, preset: bool, name=None):
"""Append a message to the chat history
Args:
role (str): Either "user", "system" or "function"
message (str): Message
preset (bool): True, if it is preset by the manifest
name (str, optional): function name (when the role is "function")
"""
self.history.append_message({"role": role, "content": message, "name": name, "preset": preset})
def append_user_question(self, message: str):
"""Append a question from the user to the history
and update the prompt if necessary (e.g, RAG)"""
message = self.manifest.format_question(message)
self.append_message("user", message, False)
if self.vector_db:
articles = self.vector_db.fetch_related_articles(self.history.messages(), self.llm_model)
assert self.history.get_message_prop(0, "role") == "system", "Missing system message"
self.history.set_message(
0,
{
"role": "system",
"content": re.sub("\\{articles\\}", articles, self.prompt, 1),
},
)
def __set_intro(self, use_intro: bool):
intro_message = None
intro = self.intro()
if use_intro and intro:
intro_message = intro[random.randrange(0, len(intro))]
self.append_message("assistant", intro_message, True)
return intro_message
def temperature(self):
"""Temperature specified in the manifest"""
return self.manifest.temperature()
def intro(self):
"""Introduction messages specified in the manifest"""
return self.manifest.get("intro")
def username(self):
"""User name specified in the manifest"""
return self.manifest.username()
def botname(self):
"""Bot name specified in the manifest"""
return self.manifest.botname()
def title(self):
"""Title of the AI agent specified in the manifest"""
return self.manifest.title()
def call_llm(self):
"""
Let the LLM generate a responce based on the messasges in this session.
The application typically calls call_loop method instead.
Returns:
role (str): "assistent"
res (str): message
function_call (dict): json representing the function call (optional)
"""
messages = self.history.messages()
(role, res, function_call, token_usage) = self.llm_model.generate_response(messages, self.manifest, self.config.verbose)
if self.config.verbose and function_call is not None:
print_info(function_call)
if role and res:
self.append_message(role, res, False)
return (res, function_call, token_usage)
def call_loop(self, callback: Callable[[str, tuple[str, dict]], None], runtime: PythonRuntime = None):
"""
Calls the LLM and process the response (functions calls).
It may call itself recursively if ncessary.
"""
(res, function_call, _) = self.call_llm()
if res:
callback("bot", res)
if function_call:
# Check if this function needs to be processed by the application (emit style)
(action_data, action_method) = function_call.get_emit_data(self.config.verbose)
if action_method:
# Yes, let the application process it
callback("emit", (action_method, action_data))
else:
# No, process it by calling its process_function_call method.
(
function_message,
function_name,
should_call_llm,
) = function_call.process_function_call(
self.history,
runtime,
self.config.verbose,
)
if function_message:
callback("function", (function_name, function_message))
if should_call_llm:
self.call_loop(callback, runtime)