forked from gkamradt/langchain-tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchatapi.py
99 lines (90 loc) · 3.48 KB
/
chatapi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
def main():
print("hello chatapi...")
chat = ChatOpenAI()
query = "What is the name of the most populous state in the USA?"
message = [HumanMessage(content=query)]
resp = chat(message)
print(f"query is {query}")
print(f"resp is {resp.content}")
querySystemMessage = "Say the opposite of what the user says"
queryHumanMessage = "I love programming."
messages = [
SystemMessage(content=querySystemMessage),
HumanMessage(content=queryHumanMessage),
]
resp = chat(messages)
print(f"query is {querySystemMessage}")
print(f"query is {queryHumanMessage}")
print(f"resp is {resp.content}")
messages = [
SystemMessage(content="Say the opposite of what the user says"),
HumanMessage(content="I love programming."),
AIMessage(content="I hate programming."),
HumanMessage(content="The moon is out"),
]
print(chat(messages).content)
messages = [
SystemMessage(content="Say the opposite of what the user says"),
HumanMessage(content="I love programming."),
AIMessage(content="I hate programming."),
HumanMessage(content="What is the first thing that I said?"),
]
print(chat(messages).content)
batch_messages = [
[
SystemMessage(
content="You are a helpful word machine that creates an alliteration using a base word"
),
HumanMessage(content="Base word: Apple"),
],
[
SystemMessage(
content="You are a helpful word machine that creates an alliteration using a base word"
),
HumanMessage(content="Base word: Dog"),
],
]
chat.generate(batch_messages)
# Make SystemMessagePromptTemplate
prompt = PromptTemplate(
template="Propose creative ways to incorporate {food_1} and {food_2} in the cuisine of the users choice.",
input_variables=["food_1", "food_2"],
)
system_message_prompt = SystemMessagePromptTemplate(prompt=prompt)
# Output of system_message_prompt
system_message_prompt.format(food_1="Bacon", food_2="Shrimp")
# Make HumanMessagePromptTemplate
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
# Create ChatPromptTemplate: Combine System + Human
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chat_prompt_with_values = chat_prompt.format_prompt(
food_1="Bacon", food_2="Shrimp", text="I really like food from Germany."
)
chat_prompt_with_values.to_messages()
response = chat(chat_prompt_with_values.to_messages()).content
print(f"response is: {response}\n\n")
chat = ChatOpenAI(
streaming=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
verbose=True,
temperature=0,
)
resp = chat(chat_prompt_with_values.to_messages())
print(f"\nresp is: {resp.content}")
if __name__ == "__main__":
load_dotenv()
main()