-
Notifications
You must be signed in to change notification settings - Fork 7
/
backend.ts
75 lines (65 loc) · 2.63 KB
/
backend.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import { GenezioDeploy } from "@genezio/types";
import { LanceDB } from "@langchain/community/vectorstores/lancedb";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { connect } from "vectordb";
import {
RunnableLambda,
RunnableMap,
RunnablePassthrough,
} from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
@GenezioDeploy()
export class BackendService {
constructor() {}
async ask(question: string): Promise<string> {
// Set the OpenAI API key
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
if (!OPENAI_API_KEY) {
throw new Error(
"You need to provide an OpenAI API key. Go to https://platform.openai.com/account/api-keys and save it in a `.env` file.",
);
}
// Define the OpenAI model
const model = new OpenAI({
modelName: "gpt-4",
openAIApiKey: OPENAI_API_KEY,
temperature: 0.5,
verbose: true
});
// Define the prompt that will be fed to the model
const prompt = ChatPromptTemplate.fromMessages([
[
"ai",
`Answer the question based on only the following context. If the information is not in the context, use your previous knowledge to answer the question.
{context}`,
],
["human", "{question}"],
]);
// Set the database path
const database = "./lancedb";
// Connect to the database
const db = await connect(database);
// Open the table
const table = await db.openTable("vectors");
// Initialize the vector store object with the OpenAI embeddings and the table
const vectorStore = new LanceDB(new OpenAIEmbeddings(), { table });
// Retrieve the most similar context to the input question
const retriever = vectorStore.asRetriever(1);
// Create an output parser that will convert the model's response to a string
const outputParser = new StringOutputParser();
// Create a pipeline that will feed the input question and the database retrieved context to the model
const setupAndRetrieval = RunnableMap.from({
context: new RunnableLambda({
func: (input: string) => retriever.invoke(input).then((response) => response[0].pageContent),
}).withConfig({ runName: "contextRetriever" }),
question: new RunnablePassthrough(),
});
// Feed the input question and the database retrieved context to the model
const chain = setupAndRetrieval.pipe(prompt).pipe(model).pipe(outputParser);
// Invoke the model to answer the question
const response = await chain.invoke(question);
console.log("Answer:", response);
return response;
}
}