Skip to content

Commit 4632050

Browse files
committed
feat: added automatic naming of conversations
1 parent a48bc20 commit 4632050

File tree

5 files changed

+65
-15
lines changed

5 files changed

+65
-15
lines changed

gptme/commands.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from time import sleep
55
from typing import Generator, Literal
66

7+
from . import llm
78
from .constants import CMDFIX
89
from .logmanager import LogManager
910
from .message import (
@@ -92,10 +93,22 @@ def handle_cmd(
9293
log.print(show_hidden="--hidden" in args)
9394
case "rename":
9495
log.undo(1, quiet=True)
96+
log.write()
9597
# rename the conversation
98+
print("Renaming conversation (enter 'auto' to generate a name)")
9699
new_name = args[0] if args else input("New name: ")
97-
log.rename(new_name)
98-
print(f"Renamed conversation to {new_name}")
100+
if new_name == "auto":
101+
new_name = llm.generate_name(log.prepare_messages())
102+
assert " " not in new_name
103+
print(f"Generated name: {new_name}")
104+
confirm = input("Confirm? [y/N] ")
105+
if confirm.lower() not in ["y", "yes"]:
106+
print("Aborting")
107+
return
108+
log.rename(new_name, keep_date=True)
109+
else:
110+
log.rename(new_name, keep_date=False)
111+
print(f"Renamed conversation to {log.logfile.parent}")
99112
case "fork":
100113
# fork the conversation
101114
new_name = args[0] if args else input("New name: ")

gptme/llm.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,17 @@ def reply(messages: list[Message], model: str, stream: bool = False) -> Message:
5858
return Message("assistant", response)
5959

6060

61+
def _complete(prompt: str, model: str) -> str:
62+
print(prompt)
63+
response = openai.Completion.create( # type: ignore
64+
model=model,
65+
prompt=prompt,
66+
temperature=temperature,
67+
top_p=top_p,
68+
)
69+
return response.choices[0].text
70+
71+
6172
def _chat_complete(messages: list[Message], model: str) -> str:
6273
# This will generate code and such, so we need appropriate temperature and top_p params
6374
# top_p controls diversity, temperature controls randomness
@@ -138,3 +149,32 @@ def summarize(content: str) -> str:
138149
+ summary
139150
)
140151
return summary
152+
153+
154+
def generate_name(msgs: list[Message]) -> str:
155+
"""
156+
Generates a name for a given text/conversation using a LLM.
157+
"""
158+
# filter out system messages
159+
msgs = [m for m in msgs if m.role != "system"]
160+
msgs = (
161+
[
162+
Message(
163+
"system",
164+
"""
165+
The following is a conversation between a user and an assistant. Which we will generate a name for.
166+
167+
The name should be 2-5 words describing the conversation, separated by dashes. Examples:
168+
- install-llama
169+
- implement-game-of-life
170+
- capitalize-words-in-python
171+
""",
172+
)
173+
]
174+
+ msgs
175+
+ [Message("user", "Now, generate a name for this conversation.")]
176+
)
177+
name = _chat_complete(msgs, model="gpt-3.5-turbo").strip()
178+
print(name)
179+
print(f"Generated name for conversation: " + name)
180+
return name

gptme/logmanager.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,14 @@ def get_last_code_block(self) -> str | None:
132132
return msg.content.split("```")[-2].split("\n", 1)[-1]
133133
return None
134134

135-
def rename(self, name: str) -> None:
136-
# rename the conversation and log file
137-
# if you want to keep the old log, use fork()
135+
def rename(self, name: str, keep_date=False) -> None:
136+
"""
137+
rename the conversation and log file
138+
if keep_date is True, we will keep the date part of the log file name ("2021-08-01-some-name")
139+
if you want to keep the old log, use fork()
140+
"""
141+
if keep_date:
142+
name = f"{self.logfile.parent.name[:10]}-{name}"
138143
(LOGSDIR / name).mkdir(parents=True, exist_ok=True)
139144
self.logfile.rename(LOGSDIR / name / "conversation.jsonl")
140145
self.logfile = LOGSDIR / name / "conversation.jsonl"

gptme/message.py

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ def __init__(
2020
self,
2121
role: Literal["system", "user", "assistant"],
2222
content: str,
23-
user: str | None = None,
2423
pinned: bool = False,
2524
hide: bool = False,
2625
quiet: bool = False,
@@ -33,11 +32,6 @@ def __init__(
3332
self.timestamp = datetime.fromisoformat(timestamp)
3433
else:
3534
self.timestamp = timestamp or datetime.now()
36-
if user:
37-
self.user = user
38-
else:
39-
role_names = {"system": "System", "user": "User", "assistant": "Assistant"}
40-
self.user = role_names[role]
4135

4236
# Wether this message should be pinned to the top of the chat, and never context-trimmed.
4337
self.pinned = pinned
@@ -74,7 +68,7 @@ def format_msgs(
7468
outputs = []
7569
for msg in msgs:
7670
color = ROLE_COLOR[msg.role]
77-
userprefix = f"[bold {color}]{msg.user}[/bold {color}]"
71+
userprefix = f"[bold {color}]{msg.role.capitalize()}[/bold {color}]"
7872
# get terminal width
7973
max_len = shutil.get_terminal_size().columns - len(userprefix)
8074
output = ""
@@ -174,7 +168,6 @@ def toml_to_msg(toml: str) -> Message:
174168
return Message(
175169
msg["role"],
176170
msg["content"],
177-
user=msg.get("user"),
178171
pinned=msg.get("pinned", False),
179172
hide=msg.get("hide", False),
180173
quiet=msg.get("quiet", False),
@@ -196,7 +189,6 @@ def toml_to_msgs(toml: str) -> list[Message]:
196189
Message(
197190
msg["role"],
198191
msg["content"],
199-
user=msg.get("user"),
200192
pinned=msg.get("pinned", False),
201193
hide=msg.get("hide", False),
202194
quiet=msg.get("quiet", False),

gptme/util.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def len_tokens_approx(content: str | list[Message]) -> int:
4444
def msgs2text(msgs: list[Message]) -> str:
4545
output = ""
4646
for msg in msgs:
47-
output += f"{msg.user}: {msg.content}\n"
47+
output += f"{msg.role.capitalize()}: {msg.content}\n\n"
4848
return output
4949

5050

0 commit comments

Comments
 (0)