-
Notifications
You must be signed in to change notification settings - Fork 1.9k
/
overview.ts
141 lines (112 loc) Β· 4.23 KB
/
overview.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import { AgentExecutor, ChatAgent } from "langchain/agents";
import { ConversationChain, LLMChain } from "langchain/chains";
import { ChatOpenAI } from "@langchain/openai";
import { BufferMemory } from "langchain/memory";
import {
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
} from "@langchain/core/prompts";
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
import { SerpAPI } from "@langchain/community/tools/serpapi";
export const run = async () => {
const chat = new ChatOpenAI({ temperature: 0 });
// Sending one message to the chat model, receiving one message back
let response = await chat.invoke([
new HumanMessage(
"Translate this sentence from English to French. I love programming."
),
]);
console.log(response);
// Sending an input made up of two messages to the chat model
response = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage("Translate: I love programming."),
]);
console.log(response);
// Sending two separate prompts in parallel, receiving two responses back
const responseA = await chat.invoke([
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage(
"Translate this sentence from English to French. I love programming."
),
new SystemMessage(
"You are a helpful assistant that translates English to French."
),
new HumanMessage(
"Translate this sentence from English to French. I love artificial intelligence."
),
]);
console.log(responseA);
// Using ChatPromptTemplate to encapsulate the reusable parts of the prompt
const translatePrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
"You are a helpful assistant that translates {input_language} to {output_language}."
),
HumanMessagePromptTemplate.fromTemplate("{text}"),
]);
const responseB = await chat.invoke(
await translatePrompt.formatPromptValue({
input_language: "English",
output_language: "French",
text: "I love programming.",
})
);
console.log(responseB);
// This pattern of asking for the completion of a formatted prompt is quite
// common, so we introduce the next piece of the puzzle: LLMChain
const translateChain = new LLMChain({
prompt: translatePrompt,
llm: chat,
});
const responseC = await translateChain.invoke({
input_language: "English",
output_language: "French",
text: "I love programming.",
});
console.log(responseC);
// Next up, stateful chains that remember the conversation history
const chatPrompt = ChatPromptTemplate.fromMessages([
SystemMessagePromptTemplate.fromTemplate(
"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know."
),
new MessagesPlaceholder("history"),
HumanMessagePromptTemplate.fromTemplate("{input}"),
]);
const chain = new ConversationChain({
memory: new BufferMemory({ returnMessages: true }),
prompt: chatPrompt,
llm: chat,
});
const responseE = await chain.invoke({
input: "hi from London, how are you doing today",
});
console.log(responseE);
const responseF = await chain.invoke({
input: "Do you know where I am?",
});
console.log(responseF);
// Finally, we introduce Tools and Agents, which extend the model with
// other abilities, such as search, or a calculator
// Define the list of tools the agent can use
const tools = [
new SerpAPI(process.env.SERPAPI_API_KEY, {
location: "Austin,Texas,United States",
hl: "en",
gl: "us",
}),
];
// Create the agent from the chat model and the tools
const agent = ChatAgent.fromLLMAndTools(new ChatOpenAI(), tools);
// Create an executor, which calls to the agent until an answer is found
const executor = AgentExecutor.fromAgentAndTools({ agent, tools });
const responseG = await executor.invoke({
input: "How many people live in canada as of 2023?",
});
console.log(responseG);
};