-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
multi_prompt.ts
169 lines (159 loc) Β· 5.75 KB
/
multi_prompt.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
import { z } from "zod";
import { interpolateFString, PromptTemplate } from "@langchain/core/prompts";
import { MultiRouteChain, MultiRouteChainInput } from "./multi_route.js";
import { STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE } from "./multi_prompt_prompt.js";
import { BaseChain } from "../../chains/base.js";
import { LLMChain, LLMChainInput } from "../../chains/llm_chain.js";
import { LLMRouterChain } from "./llm_router.js";
import { ConversationChain } from "../../chains/conversation.js";
import { zipEntries } from "./utils.js";
import { RouterOutputParser } from "../../output_parsers/router.js";
/**
* A class that represents a multi-prompt chain in the LangChain
* framework. It extends the MultiRouteChain class and provides additional
* functionality specific to multi-prompt chains.
* @example
* ```typescript
* const multiPromptChain = MultiPromptChain.fromLLMAndPrompts(new ChatOpenAI(), {
* promptNames: ["physics", "math", "history"],
* promptDescriptions: [
* "Good for answering questions about physics",
* "Good for answering math questions",
* "Good for answering questions about history",
* ],
* promptTemplates: [
* `You are a very smart physics professor. Here is a question:\n{input}\n`,
* `You are a very good mathematician. Here is a question:\n{input}\n`,
* `You are a very smart history professor. Here is a question:\n{input}\n`,
* ],
* });
* const result = await multiPromptChain.call({
* input: "What is the speed of light?",
* });
* ```
*/
export class MultiPromptChain extends MultiRouteChain {
/**
* @deprecated Use `fromLLMAndPrompts` instead
*/
static fromPrompts(
llm: BaseLanguageModelInterface,
promptNames: string[],
promptDescriptions: string[],
promptTemplates: string[] | PromptTemplate[],
defaultChain?: BaseChain,
options?: Omit<MultiRouteChainInput, "defaultChain">
) {
return MultiPromptChain.fromLLMAndPrompts(llm, {
promptNames,
promptDescriptions,
promptTemplates,
defaultChain,
multiRouteChainOpts: options,
});
}
/**
* A static method that creates an instance of MultiPromptChain from a
* BaseLanguageModel and a set of prompts. It takes in optional parameters
* for the default chain and additional options.
* @param llm A BaseLanguageModel instance.
* @param promptNames An array of prompt names.
* @param promptDescriptions An array of prompt descriptions.
* @param promptTemplates An array of prompt templates.
* @param defaultChain An optional BaseChain instance to be used as the default chain.
* @param llmChainOpts Optional parameters for the LLMChainInput, excluding 'llm' and 'prompt'.
* @param conversationChainOpts Optional parameters for the LLMChainInput, excluding 'llm' and 'outputKey'.
* @param multiRouteChainOpts Optional parameters for the MultiRouteChainInput, excluding 'defaultChain'.
* @returns An instance of MultiPromptChain.
*/
static fromLLMAndPrompts(
llm: BaseLanguageModelInterface,
{
promptNames,
promptDescriptions,
promptTemplates,
defaultChain,
llmChainOpts,
conversationChainOpts,
multiRouteChainOpts,
}: {
promptNames: string[];
promptDescriptions: string[];
promptTemplates: string[] | PromptTemplate[];
defaultChain?: BaseChain;
llmChainOpts?: Omit<LLMChainInput, "llm" | "prompt">;
conversationChainOpts?: Omit<LLMChainInput, "llm" | "outputKey">;
multiRouteChainOpts?: Omit<MultiRouteChainInput, "defaultChain">;
}
): MultiPromptChain {
const destinations = zipEntries(promptNames, promptDescriptions).map(
([name, desc]) => `${name}: ${desc}`
);
const structuredOutputParserSchema = z.object({
destination: z
.string()
.optional()
.describe('name of the question answering system to use or "DEFAULT"'),
next_inputs: z
.object({
input: z
.string()
.describe("a potentially modified version of the original input"),
})
.describe("input to be fed to the next model"),
});
const outputParser = new RouterOutputParser(structuredOutputParserSchema);
const destinationsStr = destinations.join("\n");
const routerTemplate = interpolateFString(
STRUCTURED_MULTI_PROMPT_ROUTER_TEMPLATE(
outputParser.getFormatInstructions({ interpolationDepth: 4 })
),
{
destinations: destinationsStr,
}
);
const routerPrompt = new PromptTemplate({
template: routerTemplate,
inputVariables: ["input"],
outputParser,
});
const routerChain = LLMRouterChain.fromLLM(llm, routerPrompt);
const destinationChains = zipEntries<[string, string | PromptTemplate]>(
promptNames,
promptTemplates
).reduce((acc, [name, template]) => {
let myPrompt: string | PromptTemplate;
if (typeof template === "object") {
myPrompt = template;
} else if (typeof template === "string") {
myPrompt = new PromptTemplate({
template: template as string,
inputVariables: ["input"],
});
} else {
throw new Error("Invalid prompt template");
}
acc[name as string] = new LLMChain({
...llmChainOpts,
llm,
prompt: myPrompt,
});
return acc;
}, {} as { [name: string]: LLMChain });
const convChain = new ConversationChain({
...conversationChainOpts,
llm,
outputKey: "text",
});
return new MultiPromptChain({
...multiRouteChainOpts,
routerChain,
destinationChains,
defaultChain: defaultChain ?? convChain,
});
}
_chainType(): string {
return "multi_prompt_chain";
}
}