-
Notifications
You must be signed in to change notification settings - Fork 30
/
try-llm.py
68 lines (50 loc) · 1.75 KB
/
try-llm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""
This example shows how to use Langroid to interact directly with an OpenAI GPT chat model,
i.e., without wrapping it in an Agent.
Run as follows:
python3 examples/quick-start/try-llm.py
For more explanation see the
[Getting Started guide](https://langroid.github.io/langroid/quick-start/llm-interaction/)
"""
import typer
from rich import print
from rich.prompt import Prompt
import langroid as lr
Role = lr.language_models.Role
LLMMessage = lr.language_models.LLMMessage
app = typer.Typer()
def chat() -> None:
print("[blue]Welcome to langroid!")
cfg = lr.language_models.OpenAIGPTConfig(
chat_model=lr.language_models.OpenAIChatModel.GPT4,
)
mdl = lr.language_models.OpenAIGPT(cfg)
messages = [
LLMMessage(role=Role.SYSTEM, content="You are a helpful assitant"),
]
while True:
message = Prompt.ask("[blue]Human")
if message in ["x", "q"]:
print("[magenta]Bye!")
break
messages.append(LLMMessage(role=Role.USER, content=message))
# use the OpenAI ChatCompletion API to generate a response
response = mdl.chat(messages=messages, max_tokens=200)
messages.append(response.to_LLMMessage())
print("[green]Bot: " + response.message)
@app.command()
def main(
debug: bool = typer.Option(False, "--debug", "-d", help="debug mode"),
no_stream: bool = typer.Option(False, "--nostream", "-ns", help="no streaming"),
nocache: bool = typer.Option(False, "--nocache", "-nc", help="don't use cache"),
) -> None:
lr.utils.configuration.set_global(
lr.utils.configuration.Settings(
debug=debug,
cache=not nocache,
stream=not no_stream,
)
)
chat()
if __name__ == "__main__":
app()