Skip to content

Commit

Permalink
Support save/load conversation in both JSON and Markdown format
Browse files Browse the repository at this point in the history
  • Loading branch information
evilpan committed Mar 22, 2023
1 parent cc6985f commit 0777a45
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 22 deletions.
11 changes: 6 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Take chatGPT into command line.
1. clone this repo
2. pip3 install -U -r requirements.txt
3. copy `demo_config.json` to `config.json`
3. get your [OPENAI_API_KEY][key] and put it in `config.json`
4. get your [OPENAI_API_KEY][key] and put it in `config.json`

# Run

Expand Down Expand Up @@ -50,11 +50,12 @@ gptcli commands (use '.help -v' for verbose/'.help <topic>' for details):
======================================================================================================
.edit Run a text editor and optionally open a file with it
.help List available commands or provide detailed help for a specific command
.load Load conversation from file
.multiline input multiple lines, end with ctrl-d(Linux/macOS) or ctrl-z(Windows). Cancel with ctrl-c
.load Load conversation from Markdown/JSON file
.multiline input multiple lines, end with ctrl-d(Linux/macOS) or ctrl-z(Windows). Cancel
with ctrl-c
.quit Exit this application
.reset Reset session, i.e. clear chat history
.save Save current conversation to file
.save Save current conversation to Markdown/JSON file
.set Set a settable parameter or show current settings of parameters
```

Expand All @@ -80,7 +81,7 @@ $ docker run --rm -it -v $PWD/config.json:/gptcli/config.json --network host gpt
- [x] Multiline input
- [x] Stream output
- [x] Single Python script
- [x] Save and load session from file
- [x] Save and load session from file (Markdown/JSON)

# LINK

Expand Down
56 changes: 39 additions & 17 deletions gptcli.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class Config:
sep = Markdown("---")
baseDir = os.path.dirname(os.path.realpath(__file__))
default = os.path.join(baseDir, "config.json")
mdSep = '\n\n' + '-' * 10 + '\n'

def __init__(self, file=None) -> None:
self.cfg = {}
Expand All @@ -28,13 +29,14 @@ def __init__(self, file=None) -> None:
def load(self, file):
with open(file, "r") as f:
self.cfg = json.load(f)
self.key = self.cfg.get("key", openai.api_key)
self.api_base = self.cfg.get("api_base", openai.api_base)
self.model = self.cfg.get("model", "gpt-3.5-turbo")
self.prompt = self.cfg.get("prompt", [])
self.stream = self.cfg.get("stream", False)
self.response = self.cfg.get("response", False)
self.proxy = self.cfg.get("proxy", "")
c = self.cfg
self.api_key = c.get("api_key", c.get("key", openai.api_key)) # compatible with history key
self.api_base = c.get("api_base", openai.api_base)
self.model = c.get("model", "gpt-3.5-turbo")
self.prompt = c.get("prompt", [])
self.stream = c.get("stream", False)
self.response = c.get("response", False)
self.proxy = c.get("proxy", "")

def get(self, key, default=None):
return self.cfg.get(key, default)
Expand All @@ -56,7 +58,7 @@ def __init__(self, config):
self.session = []
# Init config
self.config = Config(config)
self.api_key = self.config.key
self.api_key = self.config.api_key
self.api_base = self.config.api_base
self.api_model = self.config.model
self.api_prompt = self.config.prompt
Expand Down Expand Up @@ -114,15 +116,29 @@ def handle_input(self, content: str):
elif self.api_response:
self.session.append({"role": "assistant", "content": answer})

def load_session(self, file):
def load_session(self, file, mode="md", append=False):
if not append:
self.session.clear()
with open(file, "r") as f:
self.session = json.load(f)
data = f.read()
if mode == "json":
self.session.extend(json.loads(data))
elif mode == "md":
for chat in data.split(Config.mdSep):
role, content = chat.split(": ", 1)
self.session.append({"role": role, "content": content})
self.print("Load {} records from {}".format(len(self.session), file))

def save_session(self, file):
def save_session(self, file, mode="md"):
self.print("Save {} records to {}".format(len(self.session), file))
with open(file, "w") as f:
json.dump(self.session, f, indent=2)
if mode == "json":
with open(file, "w") as f:
json.dump(self.session, f, indent=2)
elif mode == "md":
chats = ["{}: {}".format(chat["role"], chat["content"])
for chat in self.session]
with open(file, "w") as f:
f.write(Config.mdSep.join(chats))

def query_openai(self, data: dict) -> str:
messages = []
Expand Down Expand Up @@ -196,20 +212,26 @@ def do_reset(self, args):
self.print("session cleared.")

parser_save = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_save.add_argument("-m", dest="mode", choices=["json", "md"],
default="md", help="save as json or markdown (default: md)")
parser_save.add_argument("file", help="target file to save",
completer=cmd2.Cmd.path_complete)
@with_argparser(parser_save)
def do_save(self, args: Namespace):
"Save current conversation to file"
self.save_session(args.file)
"Save current conversation to Markdown/JSON file"
self.save_session(args.file, args.mode)

parser_load = argparse_custom.DEFAULT_ARGUMENT_PARSER()
parser_load.add_argument("-f", dest="append", action="store_true",
help="append to current chat, by default current chat will be cleared")
parser_load.add_argument("-m", dest="mode", choices=["json", "md"],
default="md", help="load as json or markdown (default: md)")
parser_load.add_argument("file", help="target file to load",
completer=cmd2.Cmd.path_complete)
@with_argparser(parser_load)
def do_load(self, args: Namespace):
"Load conversation from file"
self.load_session(args.file)
"Load conversation from Markdown/JSON file"
self.load_session(args.file, args.mode, args.append)

def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
Expand Down

0 comments on commit 0777a45

Please sign in to comment.