-
Notifications
You must be signed in to change notification settings - Fork 0
/
rag_fusion.py
286 lines (213 loc) · 7.28 KB
/
rag_fusion.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
"""
RAG Fusion sample
"""
import argparse
import os
import sys
from operator import itemgetter
from dotenv import load_dotenv
from langchain.load import dumps, loads
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.document_loaders.wikipedia import WikipediaLoader
from langchain_community.vectorstores import Chroma
from langchain_core.documents.base import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
# LLM model
LLM_MODEL_OPENAI = "gpt-3.5-turbo"
EMBEDDING_MODEL = "text-embedding-3-small"
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--query', help='Query with RRF search')
parser.add_argument('-r', '--retriever', help='Retrieve with RRF retriever')
parser.add_argument('-v', '--vector', help='Query with vector search')
# Retriever options
TOP_K = 5
MAX_DOCS_FOR_CONTEXT = 8
DOCUMENT_URL = "https://ja.wikipedia.org/wiki/%E5%8C%97%E9%99%B8%E6%96%B0%E5%B9%B9%E7%B7%9A"
# .env
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
# Template
my_template_jp = """Please answer the [question] using only the following [information] in Japanese. If there is no [information] available to answer the question, do not force an answer.
Information: {context}
Question: {question}
Final answer:"""
def load_and_split_document(url: str) -> list[Document]:
"""Load and split document
Args:
url (str): Document URL
Returns:
list[Document]: splitted documents
"""
# Read the Wep documents from 'url'
raw_documents = WebBaseLoader(url).load()
# Define chunking strategy
text_splitter = TokenTextSplitter(chunk_size=2048, chunk_overlap=24)
# Split the documents
documents = text_splitter.split_documents(raw_documents)
# for TEST
print("Original document: ", len(documents), " docs")
return documents
def create_retriever(search_type: str, kwargs: dict) -> BaseRetriever:
"""Create vector retriever
Args:
search_type (str): search type
kwargs (dict): kwargs
Returns:
BaseRetriever: Retriever
"""
# load and split document
documents = load_and_split_document(DOCUMENT_URL)
# chroma db
embeddings = OpenAIEmbeddings(model=EMBEDDING_MODEL)
vectordb = Chroma.from_documents(documents, embeddings)
# retriever
retriever = vectordb.as_retriever(
search_type=search_type,
search_kwargs=kwargs,
)
return retriever
def reciprocal_rank_fusion(results: list[list], k=60):
"""Rerank docs (Reciprocal Rank Fusion)
Args:
results (list[list]): retrieved documents
k (int, optional): parameter k for RRF. Defaults to 60.
Returns:
ranked_results: list of documents reranked by RRF
"""
fused_scores = {}
for docs in results:
# Assumes the docs are returned in sorted order of relevance
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [
(loads(doc), score)
for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)
]
# for TEST (print reranked documentsand scores)
print("Reranked documents: ", len(reranked_results))
for doc in reranked_results:
print('---')
print('Docs: ', ' '.join(doc[0].page_content[:100].split()))
print('RRF score: ', doc[1])
# return only documents
return [x[0] for x in reranked_results[:MAX_DOCS_FOR_CONTEXT]]
def query_generator(original_query: dict) -> list[str]:
"""Generate queries from original query
Args:
query (dict): original query
Returns:
list[str]: list of generated queries
"""
# original query
query = original_query.get("query")
# prompt for query generator
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant that generates multiple search queries based on a single input query."),
("user", "Generate multiple search queries related to: {original_query}. When creating queries, please refine or add closely related contextual information in Japanese, without significantly altering the original query's meaning"),
("user", "OUTPUT (3 queries):")
])
# LLM model
model = ChatOpenAI(
temperature=0,
model_name=LLM_MODEL_OPENAI
)
# query generator chain
query_generator_chain = (
prompt | model | StrOutputParser() | (lambda x: x.split("\n"))
)
# gererate queries
queries = query_generator_chain.invoke({"original_query": query})
# add original query
queries.insert(0, "0. " + query)
# for TEST
print('Generated queries:\n', '\n'.join(queries))
return queries
def rrf_retriever(query: str) -> list[Document]:
"""RRF retriever
Args:
query (str): Query string
Returns:
list[Document]: retrieved documents
"""
# Retriever
retriever = create_retriever(search_type="similarity", kwargs={"k": TOP_K})
# RRF chain
chain = (
{"query": itemgetter("query")}
| RunnableLambda(query_generator)
| retriever.map()
| reciprocal_rank_fusion
)
# invoke
result = chain.invoke({"query": query})
return result
def query(query: str, retriever: BaseRetriever):
"""
Query with vectordb
"""
# model
model = ChatOpenAI(
temperature=0,
model_name=LLM_MODEL_OPENAI)
# prompt
prompt = PromptTemplate(
template=my_template_jp,
input_variables=["context", "question"],
)
# Query chain
chain = (
{
"context": itemgetter("question") | retriever,
"question": itemgetter("question")
}
| RunnablePassthrough.assign(
context=itemgetter("context")
)
| {
"response": prompt | model | StrOutputParser(),
"context": itemgetter("context"),
}
)
# execute chain
result = chain.invoke({"question": query})
return result
# main
def main():
# OpenAI API KEY
if os.environ.get("OPENAI_API_KEY") == "":
print("`OPENAI_API_KEY` is not set", file=sys.stderr)
sys.exit(1)
# args
args = parser.parse_args()
# retriever
retriever = RunnableLambda(rrf_retriever)
# query
if args.query:
retriever = RunnableLambda(rrf_retriever)
result = query(args.query, retriever)
elif args.retriever:
retriever = RunnableLambda(rrf_retriever)
result = rrf_retriever(args.retriever)
sys.exit(0)
elif args.vector:
retriever = create_retriever(
search_type="similarity",
kwargs={"k": MAX_DOCS_FOR_CONTEXT}
)
result = query(args.vector, retriever)
else:
sys.exit(0)
# print answer
print('---\nAnswer:')
print(result['response'])
if __name__ == '__main__':
main()