1
1
"""
2
- This is a long-living agent that is designed to be a companion to the user.
2
+ GPTMe
3
+ =====
4
+
5
+ This is a long-living AI language model called GPTMe, it is designed to be a helpful companion.
3
6
4
7
It should be able to help the user in various ways, such as:
5
8
6
- - Acting as an executive assistant
7
- - Answering questions
8
- - Helping strategize
9
- - Giving advice
10
9
- Writing code
10
+ - Using the shell
11
+ - Assisting with technical tasks
11
12
- Writing prose (such as email, code docs, etc.)
12
- - Providing companionship
13
+ - Acting as an executive assistant
13
14
14
15
The agent should be able to learn from the user and adapt to their needs.
15
- The agent should try to always output information using markdown formatting, preferably using GitHub Flavored Markdown.
16
+ The agent should always output information using GitHub Flavored Markdown.
17
+ THe agent should always output code and commands in markdown code blocks with the appropriate language tag.
16
18
17
19
Since the agent is long-living, it should be able to remember things that the user has told it,
18
20
to do so, it needs to be able to store and query past conversations in a database.
19
21
"""
22
+ # The above docstring is the first message that the agent will see.
20
23
21
24
from typing import Literal , Generator
22
25
from datetime import datetime
23
26
import logging
24
27
import os
25
28
import sys
26
29
import shutil
30
+ import readline # noqa: F401
27
31
import itertools
28
32
from pathlib import Path
29
33
30
- from termcolor import colored
34
+ from termcolor import colored # type: ignore
31
35
import openai
32
36
import click
33
37
34
- import typing
35
38
36
39
from .constants import role_color
37
- from .tools import _execute_linecmd , _execute_codeblock , _execute_save , _execute_shell , _execute_python
40
+ from .tools import (
41
+ _execute_linecmd ,
42
+ _execute_codeblock ,
43
+ _execute_save ,
44
+ _execute_shell ,
45
+ _execute_python ,
46
+ )
38
47
from .util import msgs2dicts
39
48
from .message import Message
40
49
from .logmanager import LogManager
50
+ from .prompts import initial_prompt
41
51
42
52
logger = logging .getLogger (__name__ )
43
53
logging .basicConfig (level = logging .INFO )
44
54
45
55
56
+ LLMChoice = Literal ["openai" , "llama" ]
57
+
58
+ readline .add_history ("What is love?" )
59
+ readline .add_history ("Have you heard about an open-source app called ActivityWatch?" )
60
+ readline .add_history (
61
+ "Explain the 'Attention is All You Need' paper in the style of Andrej Karpathy."
62
+ )
63
+
64
+
46
65
def get_logfile (logdir : str ) -> str :
47
66
logdir = logdir + "/"
48
67
if not os .path .exists (logdir ):
@@ -106,28 +125,52 @@ def handle_cmd(cmd: str, logmanager: LogManager) -> Generator[Message, None, Non
106
125
sys .exit (0 )
107
126
case _:
108
127
print ("Available commands:" )
109
- for cmd in typing .get_args (Actions ):
110
- desc = action_descriptions .get (cmd , default = "missing description" )
128
+ for cmd , desc in action_descriptions .items ():
111
129
print (f" { cmd } : { desc } " )
112
130
113
131
114
132
@click .group ()
115
133
def cli ():
116
134
pass
117
135
136
+
118
137
script_path = Path (os .path .realpath (__file__ ))
119
138
139
+
120
140
@cli .command ()
121
- @click .argument ("command" , default = None , required = False )
141
+ @click .argument ("command" , default = None , required = False )
142
+ @click .option (
143
+ "--logs" ,
144
+ default = script_path .parent .parent / "logs" ,
145
+ help = "Folder where conversation logs are stored" ,
146
+ )
147
+ @click .option ("--llm" , default = "openai" , help = "LLM to use" )
122
148
@click .option (
123
- "--logs" , default = script_path .parent .parent / "logs" , help = "Folder where conversation logs are stored"
149
+ "--stream" ,
150
+ is_flag = True ,
151
+ default = True ,
152
+ help = "Wether to use streaming (only supported for openai atm)" ,
124
153
)
125
- def main (command : str | None , logs : str ):
126
- """Main interactivity loop."""
154
+ @click .option (
155
+ "--prompt" ,
156
+ default = "short" ,
157
+ help = "Can be 'short', 'full', or a custom prompt" ,
158
+ )
159
+ def main (command : str | None , logs : str , llm : LLMChoice , stream : bool , prompt : str ):
160
+ """
161
+ GPTMe, a CLI interface for LLMs.
162
+ """
127
163
openai .api_key = os .environ ["OPENAI_API_KEY" ]
164
+ openai .api_base = "http://localhost:8000/v1"
128
165
166
+ if prompt in ["full" , "short" ]:
167
+ promptmsgs = initial_prompt (short = prompt == "short" )
168
+ else :
169
+ promptmsgs = [Message ("system" , prompt )]
170
+
171
+ print (f"Using logdir { logs } " )
129
172
logfile = get_logfile (logs )
130
- logmanager = LogManager .load (logfile )
173
+ logmanager = LogManager .load (logfile , initial_msgs = promptmsgs )
131
174
logmanager .print ()
132
175
print ("--- ^^^ past messages ^^^ ---" )
133
176
@@ -143,53 +186,117 @@ def main(command: str | None, logs: str):
143
186
while True :
144
187
# if non-interactive command given on cli, exit
145
188
if command_triggered :
189
+ print ("Command triggered, exiting" )
146
190
break
147
191
148
192
# If last message was a response, ask for input.
149
- # If last message was from the user (such as from crash/edited log),
193
+ # If last message was from the user (such as from crash/edited log),
150
194
# then skip asking for input and generate response
151
- if log [- 1 ].role in ["system" , "assistant" ]:
152
- prompt = colored ("User" , role_color ["user" ]) + ": "
195
+ last_msg = log [- 1 ] if log else None
196
+ if not last_msg or (
197
+ (last_msg .role in ["system" , "assistant" ])
198
+ or (log [- 1 ].role == "user" and log [- 1 ].content .startswith ("." ))
199
+ ):
200
+ inquiry = prompt_user (command )
153
201
if command :
154
- print (prompt + command )
155
- inquiry = command
156
202
command = None
157
203
command_triggered = True
158
- else :
159
- inquiry = input (prompt )
160
-
204
+
161
205
if not inquiry :
206
+ print ("Continue 1 (rare!)" )
162
207
continue
163
208
logmanager .append (Message ("user" , inquiry ))
164
209
165
210
assert log [- 1 ].role == "user"
166
211
inquiry = log [- 1 ].content
167
212
# if message starts with ., treat as command
168
- # when command has been run,
213
+ # when command has been run,
169
214
if inquiry .startswith ("." ):
170
215
for msg in handle_cmd (inquiry , logmanager ):
171
216
logmanager .append (msg )
217
+ if command :
218
+ command_triggered = True
219
+ print ("Continue 2" )
172
220
continue
173
221
174
222
# if large context, try to reduce/summarize
175
223
# print response
176
- msg_response = reply (logmanager .prepare_messages ())
224
+ try :
225
+ msg_response = reply (logmanager .prepare_messages (), stream )
226
+
227
+ # log response and run tools
228
+ if msg_response :
229
+ for msg in itertools .chain ([msg_response ], execute_msg (msg_response )):
230
+ logmanager .append (msg )
231
+ except KeyboardInterrupt :
232
+ print ("Interrupted" )
233
+
234
+
235
+ def prompt_user (value = None ) -> str :
236
+ return prompt_input (colored ("User" , role_color ["user" ]) + ": " , value )
237
+
238
+
239
+ def prompt_input (prompt : str , value = None ) -> str :
240
+ if value :
241
+ print (prompt + value )
242
+ else :
243
+ value = input (prompt )
244
+ return value
245
+
177
246
178
- # log response and run tools
179
- for msg in itertools .chain ([msg_response ], execute_msg (msg_response )):
180
- logmanager .append (msg )
247
+ def reply (messages : list [Message ], stream : bool = False ) -> Message :
248
+ if stream :
249
+ return reply_stream (messages )
250
+ else :
251
+ prefix = colored ("Assistant" , "green" , attrs = ["bold" ])
252
+ print (f"{ prefix } : Thinking..." , end = "\r " )
253
+ response = _chat_complete (messages )
254
+ print (" " * shutil .get_terminal_size ().columns , end = "\r " )
255
+ return Message ("assistant" , response )
181
256
182
257
183
- def reply (messages : list [Message ]) -> Message :
184
- # print in-progress indicator
185
- print (colored ("Assistant" , "green" , attrs = ["bold" ]) + ": Thinking..." , end = "\r " )
186
- response = openai .ChatCompletion .create (
258
+ def _chat_complete (messages : list [Message ]) -> str :
259
+ response = openai .ChatCompletion .create ( # type: ignore
187
260
model = "gpt-3.5-turbo" ,
188
261
messages = msgs2dicts (messages ),
189
262
temperature = 0 ,
190
263
)
191
- print (" " * shutil .get_terminal_size ().columns , end = "\r " )
192
- return Message ("assistant" , response .choices [0 ].message .content )
264
+ return response .choices [0 ].message .content
265
+
266
+
267
+ def reply_stream (messages : list [Message ]) -> Message :
268
+ prefix = colored ("Assistant" , "green" , attrs = ["bold" ])
269
+ print (f"{ prefix } : Thinking..." , end = "\r " )
270
+ response = openai .ChatCompletion .create ( # type: ignore
271
+ model = "gpt-3.5-turbo" ,
272
+ messages = msgs2dicts (messages ),
273
+ temperature = 0 ,
274
+ stream = True ,
275
+ max_tokens = 1000 ,
276
+ )
277
+
278
+ def deltas_to_str (deltas : list [dict ]):
279
+ return "" .join ([d .get ("content" , "" ) for d in deltas ])
280
+
281
+ def print_clear ():
282
+ print (" " * shutil .get_terminal_size ().columns , end = "\r " )
283
+
284
+ deltas : list [dict ] = []
285
+ print_clear ()
286
+ print (f"{ prefix } : " , end = "" )
287
+ stop_reason = None
288
+ for chunk in response :
289
+ delta = chunk ["choices" ][0 ]["delta" ]
290
+ deltas .append (delta )
291
+ stop_reason = chunk ["choices" ][0 ]["finish_reason" ]
292
+ print (deltas_to_str ([delta ]), end = "" )
293
+ # need to flush stdout to get the print to show up
294
+ sys .stdout .flush ()
295
+ print_clear ()
296
+ verbose = True
297
+ if verbose :
298
+ print (f" - Stop reason: { stop_reason } " )
299
+ return Message ("assistant" , deltas_to_str (deltas ))
193
300
194
301
195
302
if __name__ == "__main__" :
0 commit comments