Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.
3 changes: 2 additions & 1 deletion bootstraprag/templates/llamaindex/rag_with_react/main.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
# driver code
from react_agent_with_query_engine import ReActWithQueryEngine

react_with_engine = ReActWithQueryEngine(input_dir='<YOUR_DATA_FOLDER>')

react_with_engine = ReActWithQueryEngine(input_dir='<YOUR_DATA_DIRECTORY>', show_progress=True)

# Start a loop to continually get input from the user
while True:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,15 @@ class ReActWithQueryEngine:
Response, StreamingResponse, AsyncStreamingResponse, PydanticResponse
]

def __init__(self, input_dir: str, similarity_top_k: int = 3):
def __init__(self, input_dir: str, similarity_top_k: int = 3, chunk_size: int = 128, chunk_overlap: int = 100, show_progress: bool = False):
self.index_loaded = False
self.similarity_top_k = similarity_top_k
self.input_dir = input_dir
self._index = None
self._engine = None
self.agent: ReActAgent = None
self.query_engine_tools = []
self.show_progress = show_progress

# use your prefered vector embeddings model
logger.info("initializing the OllamaEmbedding")
Expand All @@ -52,27 +53,32 @@ def __init__(self, input_dir: str, similarity_top_k: int = 3):
logger.info("initializing the global settings")
Settings.embed_model = embed_model
Settings.llm = llm
Settings.chunk_size = chunk_size
Settings.chunk_overlap = chunk_overlap

# Create a local Qdrant vector store
logger.info("initializing the vector store related objects")
client = qdrant_client.QdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY'])
self.vector_store = QdrantVectorStore(client=client, collection_name=os.environ['COLLECTION_NAME'])
self.client: qdrant_client.QdrantClient = qdrant_client.QdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY'])
self.vector_store = QdrantVectorStore(client=self.client, collection_name=os.environ['COLLECTION_NAME'])
self._load_data_and_create_engine()

def _load_data_and_create_engine(self):
try:
self._index = VectorStoreIndex.from_vector_store(vector_store=self.vector_store)
self.index_loaded = True
except Exception as e:
self.index_loaded = False
if self.client.collection_exists(collection_name=os.environ['COLLECTION_NAME']):
try:
self._index = VectorStoreIndex.from_vector_store(vector_store=self.vector_store)
self.index_loaded = True
except Exception as e:
self.index_loaded = False

if not self.index_loaded:
# load data
_docs = SimpleDirectoryReader(input_dir=self.input_dir).load_data()
_docs = SimpleDirectoryReader(input_dir=self.input_dir).load_data(show_progress=self.show_progress)

# build and persist index
self._index = VectorStoreIndex.from_documents(documents=_docs)

storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
logger.info("indexing the docs in VectorStoreIndex")
self._index = VectorStoreIndex.from_documents(documents=_docs, storage_context=storage_context, show_progress=self.show_progress)

self._engine = self._index.as_query_engine(similarity_top_k=self.similarity_top_k)
self._create_query_engine_tools()

Expand Down