From 3e3e3e3ff18a2231af6f2f8ae96e501747ff9b42 Mon Sep 17 00:00:00 2001 From: jasperan <23caj23@gmail.com> Date: Tue, 25 Mar 2025 05:42:05 +0100 Subject: [PATCH 1/5] feat: added web knowledge base --- agentic_rag/README.md | 2 +- agentic_rag/gradio_app.py | 24 ++++- agentic_rag/local_rag_agent.py | 167 +++++++++++++-------------------- agentic_rag/rag_agent.py | 49 +++++++--- 4 files changed, 122 insertions(+), 120 deletions(-) diff --git a/agentic_rag/README.md b/agentic_rag/README.md index e421048..176e9bb 100644 --- a/agentic_rag/README.md +++ b/agentic_rag/README.md @@ -116,7 +116,7 @@ python main.py The API will be available at `http://localhost:8000`. You can then use the API endpoints as described in the API Endpoints section below. -### 2. Using the Gradio Interface +### 2. Using the Gradio Interface (Recommended) The system provides a user-friendly web interface using Gradio, which allows you to: - Upload and process PDF documents diff --git a/agentic_rag/gradio_app.py b/agentic_rag/gradio_app.py index 6a8f98e..d38aae8 100644 --- a/agentic_rag/gradio_app.py +++ b/agentic_rag/gradio_app.py @@ -91,6 +91,17 @@ def chat(message: str, history: List[List[str]], agent_type: str, use_cot: bool, # Skip analysis for General Knowledge or when using standard chat interface (not CoT) skip_analysis = collection == "General Knowledge" or not use_cot + # Map collection names to actual collection names in vector store + collection_mapping = { + "PDF Collection": "pdf_documents", + "Repository Collection": "repository_documents", + "Web Knowledge Base": "web_documents", + "General Knowledge": "general_knowledge" + } + + # Get the actual collection name + actual_collection = collection_mapping.get(collection, "pdf_documents") + # Parse agent type to determine model and quantization quantization = None model_name = None @@ -365,15 +376,17 @@ def create_interface(): ) with gr.Column(scale=1): standard_collection_dropdown = gr.Dropdown( - choices=["PDF Collection", "Repository Collection", "General Knowledge"], + choices=["PDF Collection", "Repository Collection", "Web Knowledge Base", "General Knowledge"], value="PDF Collection", - label="Knowledge Collection" + label="Select Knowledge Base", + info="Choose which knowledge base to use for answering questions" ) gr.Markdown(""" > **Collection Selection**: > - This interface ALWAYS uses the selected collection without performing query analysis. > - "PDF Collection": Will ALWAYS search the PDF documents regardless of query type. > - "Repository Collection": Will ALWAYS search the repository code regardless of query type. + > - "Web Knowledge Base": Will ALWAYS search the web content regardless of query type. > - "General Knowledge": Will ALWAYS use the model's built-in knowledge without searching collections. """) standard_chatbot = gr.Chatbot(height=400) @@ -393,15 +406,17 @@ def create_interface(): ) with gr.Column(scale=1): cot_collection_dropdown = gr.Dropdown( - choices=["PDF Collection", "Repository Collection", "General Knowledge"], + choices=["PDF Collection", "Repository Collection", "Web Knowledge Base", "General Knowledge"], value="PDF Collection", - label="Knowledge Collection" + label="Select Knowledge Base", + info="Choose which knowledge base to use for answering questions" ) gr.Markdown(""" > **Collection Selection**: > - When a specific collection is selected, the system will ALWAYS use that collection without analysis: > - "PDF Collection": Will ALWAYS search the PDF documents. > - "Repository Collection": Will ALWAYS search the repository code. + > - "Web Knowledge Base": Will ALWAYS search the web content. > - "General Knowledge": Will ALWAYS use the model's built-in knowledge. > - This interface shows step-by-step reasoning and may perform query analysis when needed. """) @@ -485,6 +500,7 @@ def create_interface(): - Select which knowledge collection to query: - **PDF Collection**: Always searches PDF documents - **Repository Collection**: Always searches code repositories + - **Web Knowledge Base**: Always searches web content - **General Knowledge**: Uses the model's built-in knowledge without searching collections 3. **Chain of Thought Chat Interface**: diff --git a/agentic_rag/local_rag_agent.py b/agentic_rag/local_rag_agent.py index 4742160..6f68812 100644 --- a/agentic_rag/local_rag_agent.py +++ b/agentic_rag/local_rag_agent.py @@ -245,126 +245,91 @@ def process_query(self, query: str) -> Dict[str, Any]: else: return self._generate_general_response(query) else: - # For PDF or Repository collections, use context-based processing + # For PDF, Repository, or Web collections, use context-based processing if self.use_cot: return self._process_query_with_cot(query) else: return self._process_query_standard(query) def _process_query_with_cot(self, query: str) -> Dict[str, Any]: - """Process query using Chain of Thought reasoning with multiple agents""" - logger.info("Processing query with Chain of Thought reasoning") - - # Get initial context based on selected collection - initial_context = [] - if self.collection == "PDF Collection": - logger.info(f"Retrieving context from PDF Collection for query: '{query}'") - pdf_context = self.vector_store.query_pdf_collection(query) - initial_context.extend(pdf_context) - logger.info(f"Retrieved {len(pdf_context)} chunks from PDF Collection") - # Don't log individual sources to keep console clean - elif self.collection == "Repository Collection": - logger.info(f"Retrieving context from Repository Collection for query: '{query}'") - repo_context = self.vector_store.query_repo_collection(query) - initial_context.extend(repo_context) - logger.info(f"Retrieved {len(repo_context)} chunks from Repository Collection") - # Don't log individual sources to keep console clean - # For General Knowledge, no context is needed - else: - logger.info("Using General Knowledge collection, no context retrieval needed") - + """Process query using Chain of Thought reasoning""" try: - # Step 1: Planning - logger.info("Step 1: Planning") - if not self.agents or "planner" not in self.agents: - logger.warning("No planner agent available, using direct response") - return self._generate_general_response(query) + # Get context based on collection type + if self.collection == "PDF Collection": + context = self.vector_store.query_pdf_collection(query) + elif self.collection == "Repository Collection": + context = self.vector_store.query_repo_collection(query) + elif self.collection == "Web Knowledge Base": + context = self.vector_store.query_web_collection(query) + else: + context = [] - plan = self.agents["planner"].plan(query, initial_context) - logger.info(f"Generated plan:\n{plan}") + # Log number of chunks retrieved + logger.info(f"Retrieved {len(context)} chunks from {self.collection}") - # Step 2: Research each step (if researcher is available) - logger.info("Step 2: Research") - research_results = [] - if self.agents.get("researcher") is not None and initial_context: - for step in plan.split("\n"): - if not step.strip(): - continue - step_research = self.agents["researcher"].research(query, step) - research_results.append({"step": step, "findings": step_research}) - # Don't log source indices to keep console clean - logger.info(f"Research for step: {step}") - else: - # If no researcher or no context, use the steps directly - research_results = [{"step": step, "findings": []} for step in plan.split("\n") if step.strip()] - logger.info("No research performed (no researcher agent or no context available)") + # Create agents if not already created + if not self.agents: + self.agents = create_agents(self.llm, self.vector_store) - # Step 3: Reasoning about each step - logger.info("Step 3: Reasoning") - if not self.agents.get("reasoner"): - logger.warning("No reasoner agent available, using direct response") - return self._generate_general_response(query) + # Get planning step + planning_result = self.agents["planner"].plan(query, context) + logger.info("Planning step completed") - reasoning_steps = [] - for result in research_results: - step_reasoning = self.agents["reasoner"].reason( - query, - result["step"], - result["findings"] if result["findings"] else [{"content": "Using general knowledge", "metadata": {"source": "General Knowledge"}}] - ) - reasoning_steps.append(step_reasoning) - # Log just the step, not the full reasoning - logger.info(f"Reasoning for step: {result['step']}") + # Get research step + research_result = self.agents["researcher"].research(query, context) + logger.info("Research step completed") - # Step 4: Synthesize final answer - logger.info("Step 4: Synthesis") - if not self.agents.get("synthesizer"): - logger.warning("No synthesizer agent available, using direct response") - return self._generate_general_response(query) + # Get reasoning step + reasoning_result = self.agents["reasoner"].reason(query, research_result["context"]) + logger.info("Reasoning step completed") - final_answer = self.agents["synthesizer"].synthesize(query, reasoning_steps) - logger.info("Final answer synthesized successfully") + # Get synthesis step + synthesis_result = self.agents["synthesizer"].synthesize( + query, + planning_result["context"], + research_result["context"], + reasoning_result["context"] + ) + logger.info("Synthesis step completed") return { - "answer": final_answer, - "context": initial_context, - "reasoning_steps": reasoning_steps + "answer": synthesis_result["answer"], + "reasoning_steps": [ + planning_result["answer"], + research_result["answer"], + reasoning_result["answer"], + synthesis_result["answer"] + ], + "context": synthesis_result["context"] } + except Exception as e: logger.error(f"Error in CoT processing: {str(e)}") - logger.info("Falling back to general response") - return self._generate_general_response(query) + raise def _process_query_standard(self, query: str) -> Dict[str, Any]: - """Process query using standard approach without Chain of Thought""" - # Initialize context variables - pdf_context = [] - repo_context = [] - - # Get context based on selected collection - if self.collection == "PDF Collection": - logger.info(f"Retrieving context from PDF Collection for query: '{query}'") - pdf_context = self.vector_store.query_pdf_collection(query) - logger.info(f"Retrieved {len(pdf_context)} chunks from PDF Collection") - # Don't log individual sources to keep console clean - elif self.collection == "Repository Collection": - logger.info(f"Retrieving context from Repository Collection for query: '{query}'") - repo_context = self.vector_store.query_repo_collection(query) - logger.info(f"Retrieved {len(repo_context)} chunks from Repository Collection") - # Don't log individual sources to keep console clean - - # Combine all context - all_context = pdf_context + repo_context - - # Generate response using context if available, otherwise use general knowledge - if all_context: - logger.info(f"Generating response using {len(all_context)} context chunks") - response = self._generate_response(query, all_context) - else: - logger.info("No context found, using general knowledge") - response = self._generate_general_response(query) - - return response + """Process query using standard RAG approach""" + try: + # Get context based on collection type + if self.collection == "PDF Collection": + context = self.vector_store.query_pdf_collection(query) + elif self.collection == "Repository Collection": + context = self.vector_store.query_repo_collection(query) + elif self.collection == "Web Knowledge Base": + context = self.vector_store.query_web_collection(query) + else: + context = [] + + # Log number of chunks retrieved + logger.info(f"Retrieved {len(context)} chunks from {self.collection}") + + # Generate response using context + response = self._generate_response(query, context) + return response + + except Exception as e: + logger.error(f"Error in standard processing: {str(e)}") + raise def _generate_text(self, prompt: str, max_length: int = 512) -> str: """Generate text using the local model""" @@ -456,7 +421,7 @@ def main(): parser.add_argument("--model", default="mistralai/Mistral-7B-Instruct-v0.2", help="Model to use") parser.add_argument("--quiet", action="store_true", help="Disable verbose logging") parser.add_argument("--use-cot", action="store_true", help="Enable Chain of Thought reasoning") - parser.add_argument("--collection", choices=["PDF Collection", "Repository Collection", "General Knowledge"], + parser.add_argument("--collection", choices=["PDF Collection", "Repository Collection", "General Knowledge", "Web Knowledge Base"], help="Specify which collection to query") parser.add_argument("--skip-analysis", action="store_true", help="Skip query analysis step") parser.add_argument("--verbose", action="store_true", help="Show full content of sources") diff --git a/agentic_rag/rag_agent.py b/agentic_rag/rag_agent.py index 8a47fba..696bcfe 100644 --- a/agentic_rag/rag_agent.py +++ b/agentic_rag/rag_agent.py @@ -78,6 +78,19 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: # Only log content preview at debug level content_preview = chunk["content"][:150] + "..." if len(chunk["content"]) > 150 else chunk["content"] logger.debug(f"Content preview for source [{i+1}]: {content_preview}") + elif self.collection == "Web Knowledge Base": + logger.info(f"Retrieving context from Web Knowledge Base for query: '{query}'") + web_context = self.vector_store.query_web_collection(query) + initial_context.extend(web_context) + logger.info(f"Retrieved {len(web_context)} chunks from Web Knowledge Base") + # Log each chunk with citation number but not full content + for i, chunk in enumerate(web_context): + source = chunk["metadata"].get("source", "Unknown") + title = chunk["metadata"].get("title", "Unknown") + logger.info(f"Source [{i+1}]: {source} (title: {title})") + # Only log content preview at debug level + content_preview = chunk["content"][:150] + "..." if len(chunk["content"]) > 150 else chunk["content"] + logger.debug(f"Content preview for source [{i+1}]: {content_preview}") # For General Knowledge, no context is needed else: logger.info("Using General Knowledge collection, no context retrieval needed") @@ -147,16 +160,15 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: def _process_query_standard(self, query: str) -> Dict[str, Any]: """Process query using standard approach without Chain of Thought""" # Initialize context variables - pdf_context = [] - repo_context = [] + context = [] # Get context based on selected collection if self.collection == "PDF Collection": logger.info(f"Retrieving context from PDF Collection for query: '{query}'") - pdf_context = self.vector_store.query_pdf_collection(query) - logger.info(f"Retrieved {len(pdf_context)} chunks from PDF Collection") + context = self.vector_store.query_pdf_collection(query) + logger.info(f"Retrieved {len(context)} chunks from PDF Collection") # Log each chunk with citation number but not full content - for i, chunk in enumerate(pdf_context): + for i, chunk in enumerate(context): source = chunk["metadata"].get("source", "Unknown") pages = chunk["metadata"].get("page_numbers", []) logger.info(f"Source [{i+1}]: {source} (pages: {pages})") @@ -165,24 +177,33 @@ def _process_query_standard(self, query: str) -> Dict[str, Any]: logger.debug(f"Content preview for source [{i+1}]: {content_preview}") elif self.collection == "Repository Collection": logger.info(f"Retrieving context from Repository Collection for query: '{query}'") - repo_context = self.vector_store.query_repo_collection(query) - logger.info(f"Retrieved {len(repo_context)} chunks from Repository Collection") + context = self.vector_store.query_repo_collection(query) + logger.info(f"Retrieved {len(context)} chunks from Repository Collection") # Log each chunk with citation number but not full content - for i, chunk in enumerate(repo_context): + for i, chunk in enumerate(context): source = chunk["metadata"].get("source", "Unknown") file_path = chunk["metadata"].get("file_path", "Unknown") logger.info(f"Source [{i+1}]: {source} (file: {file_path})") # Only log content preview at debug level content_preview = chunk["content"][:150] + "..." if len(chunk["content"]) > 150 else chunk["content"] logger.debug(f"Content preview for source [{i+1}]: {content_preview}") - - # Combine all context - all_context = pdf_context + repo_context + elif self.collection == "Web Knowledge Base": + logger.info(f"Retrieving context from Web Knowledge Base for query: '{query}'") + context = self.vector_store.query_web_collection(query) + logger.info(f"Retrieved {len(context)} chunks from Web Knowledge Base") + # Log each chunk with citation number but not full content + for i, chunk in enumerate(context): + source = chunk["metadata"].get("source", "Unknown") + title = chunk["metadata"].get("title", "Unknown") + logger.info(f"Source [{i+1}]: {source} (title: {title})") + # Only log content preview at debug level + content_preview = chunk["content"][:150] + "..." if len(chunk["content"]) > 150 else chunk["content"] + logger.debug(f"Content preview for source [{i+1}]: {content_preview}") # Generate response using context if available, otherwise use general knowledge - if all_context: - logger.info(f"Generating response using {len(all_context)} context chunks") - response = self._generate_response(query, all_context) + if context: + logger.info(f"Generating response using {len(context)} context chunks") + response = self._generate_response(query, context) else: logger.info("No context found, using general knowledge") response = self._generate_general_response(query) From a5a1ad88a6d300c45e4a0b678a8c392ae97ba17a Mon Sep 17 00:00:00 2001 From: jasperan <23caj23@gmail.com> Date: Tue, 25 Mar 2025 19:15:45 +0100 Subject: [PATCH 2/5] fix: cot bug --- agentic_rag/README.md | 1 + agentic_rag/local_rag_agent.py | 81 +++++++++++++++++++++++++--------- agentic_rag/rag_agent.py | 58 +++++++++++++++++------- 3 files changed, 103 insertions(+), 37 deletions(-) diff --git a/agentic_rag/README.md b/agentic_rag/README.md index 176e9bb..4baa02d 100644 --- a/agentic_rag/README.md +++ b/agentic_rag/README.md @@ -119,6 +119,7 @@ The API will be available at `http://localhost:8000`. You can then use the API e ### 2. Using the Gradio Interface (Recommended) The system provides a user-friendly web interface using Gradio, which allows you to: +- Select and pull `ollama` models directly from the interface - Upload and process PDF documents - Process web content from URLs - Chat with your documents using either local or OpenAI models diff --git a/agentic_rag/local_rag_agent.py b/agentic_rag/local_rag_agent.py index 6f68812..3e539ec 100644 --- a/agentic_rag/local_rag_agent.py +++ b/agentic_rag/local_rag_agent.py @@ -272,35 +272,76 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: self.agents = create_agents(self.llm, self.vector_store) # Get planning step - planning_result = self.agents["planner"].plan(query, context) - logger.info("Planning step completed") + try: + planning_result = self.agents["planner"].plan(query, context) + logger.info("Planning step completed") + except Exception as e: + logger.error(f"Error in planning step: {str(e)}") + logger.info("Falling back to general response") + return self._generate_general_response(query) # Get research step - research_result = self.agents["researcher"].research(query, context) - logger.info("Research step completed") + research_results = [] + if self.agents.get("researcher") is not None and context: + for step in planning_result.split("\n"): + if not step.strip(): + continue + try: + step_research = self.agents["researcher"].research(query, step) + # Extract findings from research result + findings = step_research.get("findings", []) if isinstance(step_research, dict) else [] + research_results.append({"step": step, "findings": findings}) + + # Log which sources were used for this step + try: + source_indices = [context.index(finding) + 1 for finding in findings if finding in context] + logger.info(f"Research for step: {step}\nUsing sources: {source_indices}") + except ValueError as ve: + logger.warning(f"Could not find some findings in initial context: {str(ve)}") + except Exception as e: + logger.error(f"Error during research for step '{step}': {str(e)}") + research_results.append({"step": step, "findings": []}) + else: + # If no researcher or no context, use the steps directly + research_results = [{"step": step, "findings": []} for step in planning_result.split("\n") if step.strip()] + logger.info("No research performed (no researcher agent or no context available)") # Get reasoning step - reasoning_result = self.agents["reasoner"].reason(query, research_result["context"]) - logger.info("Reasoning step completed") + reasoning_steps = [] + if not self.agents.get("reasoner"): + logger.warning("No reasoner agent available, using direct response") + return self._generate_general_response(query) + + for result in research_results: + try: + step_reasoning = self.agents["reasoner"].reason( + query, + result["step"], + result["findings"] if result["findings"] else [{"content": "Using general knowledge", "metadata": {"source": "General Knowledge"}}] + ) + reasoning_steps.append(step_reasoning) + logger.info(f"Reasoning for step: {result['step']}\n{step_reasoning}") + except Exception as e: + logger.error(f"Error in reasoning for step '{result['step']}': {str(e)}") + reasoning_steps.append(f"Error in reasoning for this step: {str(e)}") # Get synthesis step - synthesis_result = self.agents["synthesizer"].synthesize( - query, - planning_result["context"], - research_result["context"], - reasoning_result["context"] - ) - logger.info("Synthesis step completed") + if not self.agents.get("synthesizer"): + logger.warning("No synthesizer agent available, using direct response") + return self._generate_general_response(query) + + try: + synthesis_result = self.agents["synthesizer"].synthesize(query, reasoning_steps) + logger.info("Synthesis step completed") + except Exception as e: + logger.error(f"Error in synthesis step: {str(e)}") + logger.info("Falling back to general response") + return self._generate_general_response(query) return { "answer": synthesis_result["answer"], - "reasoning_steps": [ - planning_result["answer"], - research_result["answer"], - reasoning_result["answer"], - synthesis_result["answer"] - ], - "context": synthesis_result["context"] + "reasoning_steps": reasoning_steps, + "context": context } except Exception as e: diff --git a/agentic_rag/rag_agent.py b/agentic_rag/rag_agent.py index 696bcfe..5d36a54 100644 --- a/agentic_rag/rag_agent.py +++ b/agentic_rag/rag_agent.py @@ -102,8 +102,13 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: logger.warning("No planner agent available, using direct response") return self._generate_general_response(query) - plan = self.agents["planner"].plan(query, initial_context) - logger.info(f"Generated plan:\n{plan}") + try: + plan = self.agents["planner"].plan(query, initial_context) + logger.info(f"Generated plan:\n{plan}") + except Exception as e: + logger.error(f"Error in planning step: {str(e)}") + logger.info("Falling back to general response") + return self._generate_general_response(query) # Step 2: Research each step (if researcher is available) logger.info("Step 2: Research") @@ -112,11 +117,21 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: for step in plan.split("\n"): if not step.strip(): continue - step_research = self.agents["researcher"].research(query, step) - research_results.append({"step": step, "findings": step_research}) - # Log which sources were used for this step - source_indices = [initial_context.index(finding) + 1 for finding in step_research if finding in initial_context] - logger.info(f"Research for step: {step}\nUsing sources: {source_indices}") + try: + step_research = self.agents["researcher"].research(query, step) + # Extract findings from research result + findings = step_research.get("findings", []) if isinstance(step_research, dict) else [] + research_results.append({"step": step, "findings": findings}) + + # Log which sources were used for this step + try: + source_indices = [initial_context.index(finding) + 1 for finding in findings if finding in initial_context] + logger.info(f"Research for step: {step}\nUsing sources: {source_indices}") + except ValueError as ve: + logger.warning(f"Could not find some findings in initial context: {str(ve)}") + except Exception as e: + logger.error(f"Error during research for step '{step}': {str(e)}") + research_results.append({"step": step, "findings": []}) else: # If no researcher or no context, use the steps directly research_results = [{"step": step, "findings": []} for step in plan.split("\n") if step.strip()] @@ -130,13 +145,17 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: reasoning_steps = [] for result in research_results: - step_reasoning = self.agents["reasoner"].reason( - query, - result["step"], - result["findings"] if result["findings"] else [{"content": "Using general knowledge", "metadata": {"source": "General Knowledge"}}] - ) - reasoning_steps.append(step_reasoning) - logger.info(f"Reasoning for step: {result['step']}\n{step_reasoning}") + try: + step_reasoning = self.agents["reasoner"].reason( + query, + result["step"], + result["findings"] if result["findings"] else [{"content": "Using general knowledge", "metadata": {"source": "General Knowledge"}}] + ) + reasoning_steps.append(step_reasoning) + logger.info(f"Reasoning for step: {result['step']}\n{step_reasoning}") + except Exception as e: + logger.error(f"Error in reasoning for step '{result['step']}': {str(e)}") + reasoning_steps.append(f"Error in reasoning for this step: {str(e)}") # Step 4: Synthesize final answer logger.info("Step 4: Synthesis") @@ -144,8 +163,13 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: logger.warning("No synthesizer agent available, using direct response") return self._generate_general_response(query) - final_answer = self.agents["synthesizer"].synthesize(query, reasoning_steps) - logger.info(f"Final synthesized answer:\n{final_answer}") + try: + final_answer = self.agents["synthesizer"].synthesize(query, reasoning_steps) + logger.info(f"Final synthesized answer:\n{final_answer}") + except Exception as e: + logger.error(f"Error in synthesis step: {str(e)}") + logger.info("Falling back to general response") + return self._generate_general_response(query) return { "answer": final_answer, @@ -153,7 +177,7 @@ def _process_query_with_cot(self, query: str) -> Dict[str, Any]: "reasoning_steps": reasoning_steps } except Exception as e: - logger.error(f"Error in CoT processing: {str(e)}") + logger.error(f"Error in CoT processing: {str(e)}", exc_info=True) logger.info("Falling back to general response") return self._generate_general_response(query) From 61d0f24a4e1ff1479c6e0a4c3ea5394e9a630d1d Mon Sep 17 00:00:00 2001 From: jasperan <23caj23@gmail.com> Date: Tue, 25 Mar 2025 20:22:41 +0100 Subject: [PATCH 3/5] feat: added untaint nodes --- agentic_rag/articles/kubernetes_rag.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/agentic_rag/articles/kubernetes_rag.md b/agentic_rag/articles/kubernetes_rag.md index 48cd03d..f95c6d9 100644 --- a/agentic_rag/articles/kubernetes_rag.md +++ b/agentic_rag/articles/kubernetes_rag.md @@ -146,6 +146,13 @@ Then, we can start setting up the solution in our cluster by following these ste kubectl apply -n agentic-rag -f local-deployment/service.yaml ``` + If for some reason, after applying these, there's a `NoSchedule` policy being triggered, you can untaint the nodes and try again: + + ```bash + kubectl taint nodes -l node.kubernetes.io/instance-type=VM.GPU.A10.1 nvidia.com/gpu:NoSchedule- + # make sure to select your own instance shape if you're using a different type than A10 GPU. + ``` + 5. Monitor the Deployment With the following commands, we can check the status of our pod: From a7accdbd35ba55e2d49590cc57d3edfd21295929 Mon Sep 17 00:00:00 2001 From: jasperan <23caj23@gmail.com> Date: Tue, 25 Mar 2025 22:14:15 +0100 Subject: [PATCH 4/5] feat: web logic --- agentic_rag/gradio_app.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/agentic_rag/gradio_app.py b/agentic_rag/gradio_app.py index d38aae8..1aa74dd 100644 --- a/agentic_rag/gradio_app.py +++ b/agentic_rag/gradio_app.py @@ -365,10 +365,17 @@ def create_interface(): repo_button = gr.Button("Process Repository") repo_output = gr.Textbox(label="Repository Processing Output") + # Define collection choices once to ensure consistency + collection_choices = [ + "PDF Collection", + "Repository Collection", + "Web Knowledge Base", + "General Knowledge" + ] + with gr.Tab("Standard Chat Interface"): with gr.Row(): with gr.Column(scale=1): - # Create model choices with quantization options standard_agent_dropdown = gr.Dropdown( choices=model_choices, value=model_choices[0] if model_choices else None, @@ -376,8 +383,8 @@ def create_interface(): ) with gr.Column(scale=1): standard_collection_dropdown = gr.Dropdown( - choices=["PDF Collection", "Repository Collection", "Web Knowledge Base", "General Knowledge"], - value="PDF Collection", + choices=collection_choices, + value=collection_choices[0], label="Select Knowledge Base", info="Choose which knowledge base to use for answering questions" ) @@ -386,7 +393,7 @@ def create_interface(): > - This interface ALWAYS uses the selected collection without performing query analysis. > - "PDF Collection": Will ALWAYS search the PDF documents regardless of query type. > - "Repository Collection": Will ALWAYS search the repository code regardless of query type. - > - "Web Knowledge Base": Will ALWAYS search the web content regardless of query type. + > - "Web Knowledge Base": Will ALWAYS search web content regardless of query type. > - "General Knowledge": Will ALWAYS use the model's built-in knowledge without searching collections. """) standard_chatbot = gr.Chatbot(height=400) @@ -398,7 +405,6 @@ def create_interface(): with gr.Tab("Chain of Thought Chat Interface"): with gr.Row(): with gr.Column(scale=1): - # Create model choices with quantization options cot_agent_dropdown = gr.Dropdown( choices=model_choices, value=model_choices[0] if model_choices else None, @@ -406,8 +412,8 @@ def create_interface(): ) with gr.Column(scale=1): cot_collection_dropdown = gr.Dropdown( - choices=["PDF Collection", "Repository Collection", "Web Knowledge Base", "General Knowledge"], - value="PDF Collection", + choices=collection_choices, + value=collection_choices[0], label="Select Knowledge Base", info="Choose which knowledge base to use for answering questions" ) @@ -416,7 +422,7 @@ def create_interface(): > - When a specific collection is selected, the system will ALWAYS use that collection without analysis: > - "PDF Collection": Will ALWAYS search the PDF documents. > - "Repository Collection": Will ALWAYS search the repository code. - > - "Web Knowledge Base": Will ALWAYS search the web content. + > - "Web Knowledge Base": Will ALWAYS search web content. > - "General Knowledge": Will ALWAYS use the model's built-in knowledge. > - This interface shows step-by-step reasoning and may perform query analysis when needed. """) From b00aca45d0c73c870eb5a82dee82291395c88641 Mon Sep 17 00:00:00 2001 From: jasperan <23caj23@gmail.com> Date: Tue, 8 Apr 2025 20:56:49 +0200 Subject: [PATCH 5/5] feat: updated architecture --- agentic_rag/README.md | 4 ++-- agentic_rag/img/architecture.png | Bin 337326 -> 334313 bytes 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/agentic_rag/README.md b/agentic_rag/README.md index 4baa02d..c046acd 100644 --- a/agentic_rag/README.md +++ b/agentic_rag/README.md @@ -10,7 +10,7 @@ The system has the following features: - Intelligent query routing - PDF processing using Docling for accurate text extraction and chunking -- Persistent vector storage with ChromaDB (PDF and Websites) +- Persistent vector storage with ChromaDB and Oracle Database 23ai (PDF and Websites) - Smart context retrieval and response generation - FastAPI-based REST API for document upload and querying - Support for both OpenAI-based agents or local, transformer-based agents (`Mistral-7B` by default) @@ -358,7 +358,7 @@ The system consists of several key components: 1. **PDF Processor**: we use `docling` to extract and chunk text from PDF documents 2. **Web Processor**: we use `trafilatura` to extract and chunk text from websites 3. **GitHub Repository Processor**: we use `gitingest` to extract and chunk text from repositories -4. **Vector Store**: Manages document embeddings and similarity search using `ChromaDB` +4. **Vector Store**: Manages document embeddings and similarity search using `ChromaDB` and `Oracle Database 23ai` 5. **RAG Agent**: Makes intelligent decisions about query routing and response generation - OpenAI Agent: Uses `gpt-4-turbo-preview` for high-quality responses, but requires an OpenAI API key - Local Agent: Uses `Mistral-7B` as an open-source alternative diff --git a/agentic_rag/img/architecture.png b/agentic_rag/img/architecture.png index a7b1dcc468fbbb3795efb65807b29520c04db4c1..e22de006d3ddff167e7df2c66025c00ebd9b7a1a 100644 GIT binary patch literal 334313 zcmb?@cRZH+|34`jqErY`h!T;qS4t^D_8x`ok-a5l9HEk1b~tx7xwCiKZX@%yM^^T@ zv)Au+QRjTlIp05j9{1^p?)$o~_w{afFdf<^@uIbM|Hdvmpopd~V_u7>> z9{r0d$0>3sblLw%I;DE##(^`q9O*EF6#P2Xb5>b*(cb5#f*IOWC(hIa*)Ye|25A*& z3)q~F5qe>VzHTod;xx1M#naM)O7^kl_xWI(ym`^0V>1-z55FF4CiU4Y-F%#-#!iWK zk8vMFRaI5pIdSrzUmO+0>f#dr{2D!Tirnv?U&h_+N&fX=+2$Mp@P&E1|4$>rHmqWAa(js~azWb=N)I)Zq zi_)-%Ha2s#bHG0~ciO)<*ZzBFr$ujJj^p>7bezLNOJ{O)n4r4DNV{du^sD9vx$;=c zHp~p~tP-!2dr)sSyO4}fiDaTlnLV6hu3(~rEIw*H?^4;YjZTieC4N*FRv~`*+1^F* ze9F%^)~S8nW*Qx3B>d{w1xtKCinUiDG<++ zVt*DvED{Gi2n26CVQH&OBPL7u6oT$SnmT1xnG^a<0@rI5IG=>@G39fVXn0I~p z!kgL3&1=3EMpkK9(qQBzwD ztF0ef-ST+Du4-c%!Yta?EwM>sZk!fujeEb~M1EpX(uu{i728-CY)V;lAb)Q!%6T&z z-4mK>8=-1h%1#?hdGfS_E8I16UCS6n^-k4@%+6DP1${ctUB>5v93hmb>&@vp?$q|C z+%Gj)!xT-);O5IWOhZ=h(6pbzsMeIs~@uQ*AX-=1f={=XPOK zw3GEr*EKDNZo_=1MaG9L_n3%2=!~Bw*Dq~b>X@Kyo6_wbun0HRTrRLt=l(XG-9YJE zMBYztQ&4S-73u4tHYm9!*I74l)}k*rQ0p#wDISx-!NGxQz8h`SE>IE_Cb*o56SP;y zl%C{Z>;7u?CaYg{T7Df**%*L8rAOB7H`X}QkT0o+_lSQk5A!8U01(O zPrH~*pc`#!$F5GeZ(P*1y_h`VFFRFm)zUPFY-}KrYOFJ%ulPyJkNG&%v?|t6xdJO% zpfFvifV~qKrX66I{?gFNJtNHhs3?jg)?$0<^NdEWcGE6DthA*qpCxuV0lBYT4;@6V zh?KoW9i5=)NO9PXdZ%fWh`ZinsdkiK>3gnuofAjmiX}d(vpKVh8}}q?T)^F^eVHvU z17nGatt6Ia@0(}Gq(no+zGM4@|GE8egq&9+A;wb3baw8=zQ?RemVI!|lvO>d9HsVXe5 zvAUGTGwB0<*9Q6d6ei9#m(5_X4Gyy8iK8UTJRO3|0;VOfK`>deQyaC0)NOZZ^B|Tu zi)OfM959eeo?|)7`qFv}Y*{l3r`T+ZIB^&X@>_AQ)Bc(77~@&+CG+m&v1^sA205Ii zr^qqg?4>))nOIXs4I&LKOua#gJgVE@vW-n$H-kMfn%k^@r`};~Dyn`d=eXrrr_g#? zd*nY^LuYD7Q>MB?DZ|7v%% zFR05Qi&J zjAj?=P-YX7@2y=Ks~;POZ!M_Q9a2(1vQH$B7TXvKG$%KisRy2eWC$h1KE|B2Zhe`} z)p-{)W2k`|6ZTGXyux|)>i6$e0p+dVenPJMS`|Q*m4D-4usmyG;1c}BqN2Y@q(&_x zYL$FVNaQo`c0{QL<0_%xr$M0WA7~aYJtI8&c zCqLcgbf1)7v)Y|4t79qMnoi%QPc9*(RzLl(>f27&9;0P?a&DC>MAGGlWxU53ko88aqEJe zo!!m08{IE{E#*{NC)2q#S>dGRqx+akepY02<}>D~*G&S=T=O>_5?4&;2TIh`)CdUJ z+1by(xp$sESl`8krZ59LE-`uwXK!b3f8q4exYShXrv2Dox)ZpGd;BNSAS?&{wAbE& zXcXff-ed_8XA8Y0OfPxq^ex6W$X{W_8^h8cUOFF!edOYz(Ufwp?FO9ShlB)u_2x{8 zwj0I~Ds8KpoSbD_7&Ki#)xz3Ha6(Q_hk~wE#K#NKvP^I8eMn6`W?^A*53Nu;W^;)$ z0OPC;x24l<_cCR`y~H-u>Yvkv1#%`4oj1f+wXPbm9HSPYf@R3g$+-}QtsCdP+v;WN z;6S;(y=`n|B`GOc^9P+~=XGt|8!2?Y2<|<8qB&BeyP>(+bz^7$mK4aJA~(I(DvphB zMmIDx3?0*5J3ydU6LijLsgBj~)ZQI3y%qj|7+z?F`%N~dNr|7X_B$qIX38}!nOj?* zICbjOlWw^ofsi^qbMpp;u*N4R1MhO!>zoO=6D43voLRcTrsKJ7U-Rhb*I_Qv-SuXs zP^mig;}>C(oSeGYNe&Q3iMZy@-6tu`82J`=8`Yfu_8#+d*ZC61?%0IL$60<^S=Wk} zJ2g=oxw*Wy+aEuE{5CLP8LrzjG<2RoZ?M$;@XK3IN-OV@>pi%;#dyoUcgb45@3s{?5pogsW|ZEfqXCxx3t_((85LWR^Z`}p{mJD< z@fL?vf1TwID)=GraYbo^W<_SZd_?AnU?zA)q^t9m8y0DJh;vL<2G0K1o zA$7G=J{O2eQcp&)oj!_G2f9=I#eNcuP77a1QprwF7X}RvKdRZjGc`L~_2WlUT5gN| zxgv*IjTKi#o}SWo*s+B^+vZ0O4k}GH$|k!JF)?>NJ4J#|FnuUc=^Q-eK8-aQ*Ee?$#d63Ix?!NFyxSjT1ZqWahm2p{DVzEGUKrb&So*|~G)sOjlfPB*An^ybQ_ zsM|R@GDS8rQj^xJpW$F<*L(cOUbUZRTBefia1P8AUutU)1KfcZ8H4C6 z1MW%cn^EuIFYp}nnpyIg?o3BvPUZdSt;gl}=FQ1oZJfWv0eFp!jErZ?XS_2%K}vkm zU!p!v*6k>{rH;4*XTsuy81Hj-o5*+x7Vz<8m(}o zqN1YIyYb=k&tsjpHPqCSH0O!F=$*(?BUMyXv{BKPk&}x&LgL|bn~OQ*zGm}9S%|HS zf^i!4Km%@nFE2v)sRZLmf!@a+9`x{0el;WG-;RV94@LK zac6{KS4~CWD6#hsi0I)8U)a_U*(tAGzwTXEcOUHLmNe3w2EBA5s zZtGZGhvO@dT?{^}8*rb6n;*HUtgFj#S5gvr&(D>W`owiv(MzX~LSDu6kp83!xC2p8 z^K8AVORegH*vf3LQSWNS@F)FSvT$SJz>PQ_`s>%P*BWqR=|w+iR6Rbq=tKvZHur`1 z&GULJzVL422p>3ci3E9AP&mS4Jw1YmkKJ$Gw_t-()6m>v3;p`-+X+|?xCkz8?(b{e z)K83zTH4x1F5e#=tqVadS>*LUxx9Ji=u0-1dui|BqJ)Tv47^wF*& z$4n0ENy9$;!%GI2!=NkNK)I3;Km`ds{(R7`O(uT~r70ZjnK3OGL?`fkI-@9KW;z1} z-{K*sTKQte{dDOkoJ5s$i6`QZ8PpFeMGY;JPc4_i~qiEU@kZvQ9@Iw$1N z6@=Bv<>TR%X?uN-ncW_O9}dOuxg&g4EB~3}fHTQNQ*^~gnMmu|9NYndjM5F$neNYt z86|^zP(gk~abzcUy}@XqT~Q8@A)p9;$2qzktmir>!85?#bV3dfcrPHa9Zx)d`aMtO z<7<63SdOkz_o4}cml@N!7ifzQ`Wt>SH#M!#atDAQCl{Qya=H>lc#Az<6C!VAesF$W zgxC84i#L?C&vc~35XAZ>pWj9uNKoq;fM9~Pvm@Oqx_tU5d>B;AAO=}137VYKb^~Ze@f=~9-D$)Yf zq~9SBAkvqX>1m^3O`f&(I}Tc0&derZaC@k7A^gF2SaXPm}m7oKcoO+ zqdXB48(Z`9XEwU&y!VSQdfya$u;=F03eB#EN~urv%?y?eDwK{#nAH=T5zkil7jS{S23Ko8RnKk6TT(3AwlAg(K zD(Zo;8i#DjTfNDQb^V$k0pesc8IK98()B0O-NA8j7kYEuf5iE+8xlB8qQlt7G)m+O zGBcSI^;brM#pWjYS@K$Ft%@IARvh6j{S0NMb^`xCIy%SQ^U9Hf2OuTgV+o zaX*(N6EU^#brLtV31bs(>}`t{HpQavH44u*!i{3G+BsAm9r?Zve?rgo4({ZlXW$Yu zvfkNiJIV+yoDxoE`O(h~FsizyhQoe&ZuTbeRziz$V_{6$?#`y+nIL)*wp8;v@;CP? z@q&FW)9IL_^Tqg}v?Ynzr;kRe15wVYaayfk0KJJE`eZiz_2n_t607Lq5$Wz@0AR6W z=C{w4$5B1M6{pwBpi)2H7-^{7SgY^A9saY=wbn>%Q(an}UTzyuQSQOFIUgQgYG-b# zeVY~86dYjvazAKefxGy$gf60-JZ?QV^bqgskhi0YDnnR`DFT zcmNEoliBO{m?@x;0*;3a2{?Yq#GaKXB(=X+{QQY>{a}$NCW^-s<4zz+Nct=RY$yTg zb*OX@-pP-eP}K7#Z@}`y@*K62^-YdeMV}6aX*W6>-zel&~E+qIQ9CJ z$63Z?+iztPPX^btw6u7wtT>R8llQ(Q8~Qf#CDN+YhN2}hy>M;t_S91mMp|`j9j~_p7LL(z5M^52?@K)Rjczs{w=MH~0!fRr8_gh(d z`f&s-O-_pMwL8|c$%%B@I7kqg&$gl+2mBWopTg>CE_rPQva?1uM%*V(NJ#iGJ)OF- z7FuWUt-ISFa$ZAIQv|t88<()~E67Q`f_ogKylv{8zOt0R8pn2FgH~~Vx)ryH8Jm8m zkwDB*>-B>)&1Q_d4C`$2W>=LLIP2X@o4}p)C>%K3=tmz*HW&#(L<Lz#f8ZcaoGC z3S8Kk{E*FC1w$?({O)0|bZ+;FGHK%)Z`#6TBN^gAR_kiyH8mLb6i-sS`J3G&@TLL_`8%wO$1W_fqshU+?KD zCMYN<%&7^@s)GX~^o%7X{cZTsYtxZ~TOSQ~VnkfcMGn8Lc%;dF1m*HR`OIGT$HmM0 zq?b2EA$jyY*Zy#$5q-f&gG(iz^2)FHUaVl)`KEv>Io1BP*Q(6ovg`3;8gX%%HoEzt z%#Qx<`I1$7Xrg{T*6Wk+9~cM%JU@ze#@&omVheRUgg2EL7}K5~AR`#c!RdT{=Q!7A zavAr0=B_TWOLLTf{$C!gPkhc60Ccr(u{tQ78MzJMs6zQ9aJrf*ltgLGs_g&bY z_Z(C}qIx?^MaO+vS$aKHcRK|=a|enpU@ZhH0_qg#{iw#yNFS_9Dl82#0=4#MHmA~C z>=@E=-DB2rJ%QjEW{M|opFP-k8iF1BHaMueSOT5G2RIJ^zV_!xC4=d6*qgB@Zgbxs zsS(@BLihFb`~lmNDg41c)*Jpg*#-cy)HP$Fo*H$B1$WR}9DraC#f72IMrZfZim7?d z4}=~PmzF-Jp`k(WYdpjg>Nx0*Qy9p#h(v206WeGBW*_6w%vGx=c3#B`%@=z^nRnk@ zQJ*66B8YKYsxxFgSF6{)wIxj9{{l@#G!isr^J?*PAT$qL69vavvszybP$@wYCLkNB z#x2chvLE`{O)+4MUHxWVuIty z0aMO{`uh5YL`^yQS9(lHHvwyB=jCBP2AQ^uctK&G?_(8Pzb{>Y8eMFkcGb=RPy;Pv z&p^Tj3jg!r4;aHA^!SK~^l#GNd+(|J`D@CWqUaHrVoIB;6FfoSb?B+*g4 zcw5K(v1Ve4HJK1R|XP2F(PXEY614NKkxdt7? z^%i|&?sv<`!VR?8FMT!a;iVTO^iJgpHT&bt_;GaghS?q$`l0j((6Jo&J)D%}+%Bkwa4c%| zV?H*o9hFg17d6YTPzHn^UvauHf0L7yj!d&Kn;tr!8#MOE>GSgPAZr&i>D~`)?e!Y9 z_d6=Kv&6aEs-WW^M`wEHxI?Dhhy17n$zY9oubbs~N0a6l5fr^hFlXf_jPhM00GIa_Nsk%gr?C#I;U!k2+-Bv0~h#L$Wti zE-v0w{!Q>EiH^P|@OZuyY8>(8*c8Cu8if@m%7E?z7p`15aTxYWL>=)JcU312_%vEo zAv&@mVtQ&)OJ_5<;$#ZcubF4-ZZ;KSpzIba1iz>9{pV^2p6YsC) zZb|Bl@)ujxPIRdh+=AxIyTB`csT={6?}W zU)LF!OE5AWg1^dJ<9(zcND z>23-I`QLErX~l6qwR(j5MHg$ydM&`o)4aAVx}ve1z_4AbSR@5_NJP5(@Ov6<^JYM6 z#>Tbw+O#czrp+A>VKXwPCpV@u^WJf4N>?T6?%)30ENXTcheg1z zY+g=|`gljk%!and>Xjq=B^#~q4Hv!ulLa9ZXAca#hdSw_&|6_%Q2u8>QJv)&TdvC2(4F@M?}Qp zh?9P@?}5y^vrGA-hWsyT=v^ByTTslGjfO6MiI0iuG=fAoJ39+?27uu>6R^+FY_+Zo zeep(d@2wV3rbgwqI{;5kjerJ{y^4`S?kqWNmRT~sZmGmqn4u3z6LmBHWV<6!H1oA= zkuP5!f)rLXHA=Kh)&;p7LKK0wmX?+sE{BuuBJIn_mHvXFR)c4&n}fTh=wiB;w+M?I z=X2ZdFrLIUKNyIP1cdC!kMpMC?=N=#Y&c;0R)kC`_2H$i=jE{*?TPG$p)O%AB|abW zVL8|hyG4o_Wbbo9-(&BY7TWASXN$6d4Yl!pwu1+!#gr=iriLi*0v#!ErchH2%QC61 zYL#!V4q7D#R6U0BpFjCAl6~3o2#Zz8G78PUO3s>|o}SyhHov(Q*ZZ20uomeJAdW&G=jaA>+mI&4U@VEbwP7#3(8Fw z?C$jG>D#_!1q17^j@}loEs{=^5q&tHjdC34OUU%xp2i#{7hU_1mU1Qh8@G^2P`Mn-129+P%7fQm6R zd;?AiX&TDCc~*JvSXo)a&SX1EJr|Fj`#A^4=c2xW1(psWVLLaIsJe^pTohZclViV4 zy$=pFdVrt8F5W!%$3%}=rm{w%C;1o`>6nMF($X4B7*pNAT_s>YC>&6?J<71a4H%o5 z-M)1CD|DYx7WYrXX~K%ey!C!tXQ1tKdoN((H`GJv|EA(n7{c9vRCaev3`7r&%=H&p z+$RB2=FI#vx6Ro+!!y7%0Obu!&b|!ix!PwNjBX+?@MwI{cB5DOOO~2>M-2|uA5Bz_ zkh7yidw3GSkyR1VDlo2Rd@HEWo_+d2>rJmYy7pP(Rz`=Skx^pWS6t}|kNQ}^M-!k} zG5zaSul*T(Tr51{3NFx*MUE*XzkuFJtw-rG^SRpXtQbcZv7g^=qm}^DIL!7K_72hj z&Vb67-QEwqA_@Z>vR-#7xya8G$oyHFHt$a&r-f;F(fdV2#}TI8jpPj!#+ z5YUNv(8WDG417NH)hK5?9*>m@T7a~BJt;HDDEc1riQpQG;4fdq|7A?d?3*Mei*?mp`^_HBNgha$50kSC4AUQ2me>$i0ycl)|zr)OXbcPb!1sGX{N;&tG zr4BEXO}YG-o69}K1HlLlpolwt3Pf+^2-1n+Bw~zjXyk^4blBju9o6e2K03?#wcKdD zHhG?oEbN@r$Ikecq*+%{F|lC4ZJl;MUZ!T2_9&pdHl(15MQ%X1PK$B7>SHBf<-1>; zOSIthy-{F2{2Xom(3j}>+xJS78Sb37iIN9%0C&{rH$rR}o`XSw@!6(ZWcovr1;gxp z6c?hBED~BSafg#46bdamwr?7`gbss49f#N3Oy7J!ZkT&k=c*EJ^g|EkuA+Uuwe+3{G{e|xPnwpV@0zfS| zgr&6!Za5!jKSXxQ|3z9xG9<(hR=EtP^VO3j%pr9?1=C76m`3q^$_@lbc_N^yVaP5^ ztup+&(Zcrt&ccC{42KUQN)}x&Y4$_)^MDU)+<7PihEbZ9PahaSUU9-V@;x2?dI*M* zXH|9G?Yqw>oer+y_)-BU33MqL&@(JZ?b4*0jpViXN>#RYM>liWw@uU=Jxdc@lVv+0 zi#3Be4GiN(U&ePHGJ-E22@;|!yAts+Ja#hIqSt!4Lscl(yCU~($+7&p zeU^4!tH9!7*%h^;1F?ekM84JSv{Fg8F=?i;}eeA z(RpPczDrgI0*r4Q)y7>D7B&yJN=tClDVTr57KT;#xo@+{0o(>^OyD9({h+=~n|bg4 ze6%y;vr%(I#W9cXr((-jOIJm@E5o4n^IG)0VAvWW!5rDIbX6)OcC*n@{HM1o917_j^K^(!0)N8P12~a6!ineC0RPy+=Sd=I?AwFT(u{?PutJ zj+c!^76l0SEJG~i_45|YlgI%>LPByaC`pMIxA4tH#Gj%m$YK6t&T>#7-B(aoA#7qL zS{Zkd!k^!x6X=TAou7$MfxbZ)uC+PGGPB-J*_ut8&G&z@7(gX>6&M(fkD?%os>Rt# z`4BvlV0?6c#zw<2eCy%l=7YWBXBGehH-Jw=FR651%eW<*CcO-x;?x-5TUPlIGYgBV z$+pDC7vIN_DPXcu;)ww$syIS1h?J^I~O`%`HM>|3GbNA4=&_86B>Fy!U5sqLJ+JbfV1kU@@nEf*Bf zV;)l6T>j_}x~i&FJ-xlqodo;)`(wlhr23wJ&iMvIAb_PsmPBH3Ct;9lj(uBk)=9oO z=2SvUQo5^0#nx7F0Ryl^f*u2EN861)cc-k>X+VyvhHKCY?uY6G!@o1DHP9-nst^(Y zqi(h72H6ie5TJHwTi^v!2HU}~UUmxqq4s3O(v|6Z%#ZK0y{-_2M!b;a96>qK{K#^$ zvv;|)zpjus?=J)$>ABA(s=!R~^7;U;fp9HBMV7b4-}TlgK^AO1RtdX`9dFnyrc<)g zWA@}Zl&0~8OQx+}i$xdRms53f-UY@5j`F5TXwqcV&hK|e#`YjKkq*16KF;9_x`@!` zCW`&8$?H{65Y(UzveS}Kk7Q&X+1a%_p0~uJI573Rck@7hFbs7SLjx5A2AB@B@q_2R zRdt{tgptF3V+y?~yidJ4KL$EO-pDF|vI@!Xv?LlumTH+$Hp~=@ebcu~a&mJY!|j0< z5cRZqkMga0%# zI0#64(eb!+*_WwJj!-E8+0Z0Cx%KWriVIA~ISg;Rx)yP~xft6FpK#6)hX$!eNF5@% zfU>%My^o9xkzqGb{YIQbw_vZivsa&kxvN!rokq)HQ+5KMF-LsmnkY%DGP| z<}X&$X1Q-V;DmDfS0wA4Vv!;PoLfL+B%fwss_yaX)hz_&M;&4hT=nQsRt?ow?KX5r z7iHUPO9C(}NG}mKLc>@Hggl^B=?*&NW6l36T{G`{kkFiQ#NK!f+UtOV0s+nPZYQzm z=;+d*L71l_N)8VgGIqa}A%*!e-=N!dK_MaR%DwX-^_b}^I0gL>e&IkUV4IPh&3VaW zCCoXdv2O>b7DV@@9q-@2=bE5}2(m1ip*tx$-&zmcNBYysn*PCoo$$R#y{&H8U zNrb+hQtOT5F!26@&~3m%ppK{TQfT8~bC5}Y%Ty5;)DS&;iPX0uA3iAS>B!ydT~2_X zRY`yHq=Gc%`i~zMKlr2dW&mJ6ZIj`91Y~BLqX$}`zp=y+i}F#+yQ}|mE#CB_h4R(K z@(1!=X7Qtku+yY?p>b!Da(Q}N?pe;UM6Qr zGmySOGL2yA%Ih?VP|1s-p}7ye^A~TDmd?&3G&vDFG@>qf2f-;1|e3}cKwvIFiFZaKaXiU?}_GyZ%+J(%izkYfY$ez4+ zF#q#a`>p4TKcg6s@2Tuh!r=b%%`a6*Jezqp<`A?L1fb(s9P$QYUqWK^Hi$Wt_pkp? zwa`j$4ab6CTEblzB%T4_@IE5qYeNGmluJZzwKNQ@24CVJ;KyJ3xcKFi&Mc4rB{5IF zId=*Mm8SYIG#md3gAVrng5#IO`_%InoeWT*CBhys!uvKTBj4=%xnuV($G8NhErXzr z{Qf<@d-hg(k?!wnVoX6J8KfTJxZmxKhy6O4csh0WRec1;CMJX^l@FBNf6E=OJlb5D zMJ?&>jOl9B?_c9|f67Jrr2jES1R_@ntQ|YlTgW!*J$vc(WV%s~-#X`&#hZ;zpcaoe zq~C)s@P|B80bpqMF%UL2DeNooG@4a77R#wk2_FdmKG%3+s(>n3W<#F6_q7zF9eB&* zD1Y5YKR-5OKsh0DqW%E>A-N&L=U@9mBhU0C9yFcjKcDLwA^d$*oqAa50qE_Z4y~#l@w76N;yUTKcBpR1bN$(I}NbvUJVXO zRZ{y)@cY;&<=t!ZJ#%2zc#qC>A3y(f9Z3QmjN`iY-w9p+y7axa2&AO4;JZ*>eKsVt zc(e5U@0kDVjS;Fjd*p}$N1^ubMf>&6_>I@+$Z9dV%B=KCk-uh}``3m1nVQ(Mr{WBs zET{js79=oa9fDi|_5up{c~qJyPPCn*V=)sS^A^Vgb9q zqtQ5`|EwBMrs>EID6^RV?`u(wc6w;>pC#=Grql5Hk0WVRU0ON-t+fhi%xHg}^4Bdp z`PT(10#C#EX?>qW~2 zQd@)4km2YVXdi~ly@;NygJP^EcDpk0PS2q;`rHyR8+33ghMI6t;Su+Ikyk5A&Nih{4s2K# zfxQwJp(z?G$rj3u9bBvCLzG|^TS`Qiw)wHxK(b8d`k`5Ae2r^++ zI?pD4!Zewjow6~A?zK&E)-GTl5s+P;a!Yi{^DVq8yKEDqmFs4uL$9Nw7|l5y=q$S& zN?*-AW+o)UmLm`DHT%@nwrV(G{Q?VvXpO6UR#^{j#y|wrl81kII5&)TNApf?9jE2( z8V5%Ud4fB7tbXG-?GF0XBX!m=U73y5 za7s95t`#jk&fT$L(=4xhTeN*SblPxBIcy|bIOp8{E|o7yI&wHn+d;kiAaVGo52DS+ znXbW}@)$v(P7a z`}uW3U$;~1k9)4h=$*e@VNq-i1K<6UTIX-C!%%bz<-l}33d ztZM2ddw1u1)bi3gTcWeg*`>@4u1>ioM~$GUiEPq!twh1)@6n31iN5BXy@ec-x!0%a zbDl=9?$)nk*mqO#9jm&@>5A$*q4*B&cev{x`+EepL&+1@tIM!sJ+))W7DE_0YO43g zWt*>m9K&}^+GKK;#hMDKv+mN6GpUn4)}#e9WxGituA&v+ZgFKoNThi(r+tDyRN1ok z_FD+nu^WSd8^VdwT$b2Nc`?MC_$ZyWfDV_KjhJHgR0UcAT@Knd%<;;uj~voRqTag2 zDAKAYdSn`u2+n3(hN0fxH!Lw{njV`@UY4C^nkee1!)Yi{7+>Uhq`W(6v~^3ZxFoz20zY1H&L4yx0nsO^>6`^n)70;p>D_3<+2t2 zqU|E!Zk-QzvR)Vx-}=I^Aa1{12pyTA8dqA;pQV7Bmn0v&MYNMnsq?iDFw7VwH|%o7x2BK*ufm%eftRICs$G zq$5p=V{nQ^d%&TkcrW%;`)d3v!{kKOfbUIo>q&ParSQ=`wHVE4xAyqJu=UU$C-tB( zURivHpncEUtLSk78QNk|Nz@gYO>G8!4HQ-eTQ>HQN_%dc`{N!ar19Dii*4KWyj!wi zZOWNw=$hy}HdWskVyMp9Tf2_Y%1C!pikL^s``Y%)@V zIYs`8ApywZv!07oo);pKjo8sBvfAej0u&+wm;(c&4=#h$hRYYkbq-&_HC+nRjA^rN zp9nQGdVn)^-`Y~HL+q~xC9!2wFR`XO@SPx-T9cdJJ81H8w#nL0WiN&oH9AH*q}WF2 zG!;VFVQ_-WA~XiwX3^}p_@=JAchzlD@bLr9x6d?d&*4$h{o@`trtt>qbB@I7tXLh$ zD`wGcj>@@DQ$oNPn=^C6VoNzMP*EsvQp%nU^ww|?S^U_<f}70LlPc}-It=Mvp~ye92kP@NZmuW;^}+PG6^+WaP-&5oD4Pk=~O<=CLuA6gYsh#iWnrB z_b^+FsbUe40q)svfU=|Iu|9^JOJ}&UoUq;sEqp{e@3PyAiN8!*I}3Noz-28-zd0S_ zbe?nkbIo6VEF@))+jkp=5t3a4WS~(rdadjqKq6k_EvxkJV<`>z9N(q6`nb>6&)7X> zWv%&d=%At8*473(MnuiXOabHEJqjiLd_oOT`%sW7(uwSYs3CG>EqEyQfmXYgLh@-) zX}@WnxF#fYY+v*z!APi$Gqij=0dzZ;BZlQnAfgQ@zP9w;$gws3XMBm7ygpCtsp-#pRPN8%nc_NRL&y zck-sp-1p!5(S&?7L2Ohm9W%Xo&BfSUka7W42p0o#m*Y7x5)x+q8PjKcm4MKGr=0rn z;U#cu5dj?t=8kDA_fF1&u7GkjLmXHbDbR4Olz+VcLdLjS<5DI^_o_A7cAAuDh zj;M|uJ4Wyf2Kq30+SOdGNZJw$042`}q2{fCN*CEoa#Tu{vB9Od z0Ov65<7; z-`Uwah&_iLv{#Lmxj2mjw-OGnPsKspeE!@QH}r6NzW z2p>Da+4o7e2I>yJhRLq9dX7aeuX+v}3`{Sj)`MxMyt49O$O+@B1t$rKoIhFUKfzXx zUp9LB^d#6}-*zB&gxCJWNErJ0(fA9iUWnf?%Y8uv!*rM)oCcuDQ1=3_?`~h8hr<&K2WjyI%-HjfMGT4hT#|EW)XdzGNTk zXzlM0H0RaKnjm5|r2xSz)v3@VQ(^j(^63^`3z@@=NTy1cgEstE{~K?b6-E?a4eIO& z-r@55pbjrxtKy6p1D!R@+NEhLAo##P#l_EGTSSHn2QCJNq%deyYKrF>ANFO)eFxJU zhzSmRB@ufeNyaxm_k{_b!7Vn+D7dTgOI8Up`ZxC^nsVQR=6+#;>r`hF+ukW9<5Wa9 zelYzCHyJm`L*aYoQyDoC@y8SgTBd+oMJ6Z@_zx3%BesDmy$4J#LtZ2et3T(8=3xdC z|L~F?OuH5{aG+Y60q;WN@v=`U{jhkLqcBX)3JU<;wK*^3FYp{S*1%Nl+Kn6S55LH3 zoCWSMChaT8e&=;E%mJy4PurPb77g}!?7~60C{TxO- zFH