diff --git a/__pycache__/main.cpython-313.pyc b/__pycache__/main.cpython-313.pyc deleted file mode 100644 index b2d4c74f..00000000 Binary files a/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 00000000..81f9bbde --- /dev/null +++ b/alembic.ini @@ -0,0 +1,116 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +script_location = backend/app/db/migrations + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python-dateutil library that can be +# installed by adding `alembic[tz]` to the pip requirements +# string value is passed to dateutil.tz.gettz() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the +# "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to backend/app/db/migrations/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:backend/app/db/migrations/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +version_path_separator = os # Use os.pathsep. Default configuration used for new projects. + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +sqlalchemy.url = sqlite+aiosqlite:///./sql_app.db + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the exec runner, execute a binary +# hooks = ruff +# ruff.type = exec +# ruff.executable = %(here)s/.venv/bin/ruff +# ruff.options = --fix REVISION_SCRIPT_FILENAME + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/backend/__pycache__/main.cpython-313.pyc b/backend/__pycache__/main.cpython-313.pyc deleted file mode 100644 index ec791129..00000000 Binary files a/backend/__pycache__/main.cpython-313.pyc and /dev/null differ diff --git a/backend/app/api/v1/__pycache__/__init__.cpython-313.pyc b/backend/app/api/v1/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 0211d1f8..00000000 Binary files a/backend/app/api/v1/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/backend/app/api/v1/__pycache__/routes.cpython-313.pyc b/backend/app/api/v1/__pycache__/routes.cpython-313.pyc deleted file mode 100644 index 487d259d..00000000 Binary files a/backend/app/api/v1/__pycache__/routes.cpython-313.pyc and /dev/null differ diff --git a/backend/app/api/v1/routes.py b/backend/app/api/v1/routes.py index a8d22310..6644e472 100644 --- a/backend/app/api/v1/routes.py +++ b/backend/app/api/v1/routes.py @@ -1,87 +1,78 @@ -from fastapi import APIRouter, BackgroundTasks +from fastapi import APIRouter, BackgroundTasks, Depends from fastapi.responses import JSONResponse +from sqlalchemy.ext.asyncio import AsyncSession +from backend.app.db.database import get_db +from backend.app.db.repositories.report_repository import ReportRepository from backend.app.models.report_models import ReportRequest, ReportResponse -from backend.app.services.report_service import generate_report, in_memory_reports, get_report_status_from_memory, get_report_data -from backend.app.core.orchestrator import create_orchestrator +from backend.app.services.report_service import generate_report, get_report_status, get_report_data +from backend.app.services.report_processor import process_report from backend.app.core.logger import api_logger from backend.app.core.exceptions import ReportNotFoundException -import asyncio +from backend.app.db.models.report_state import ReportStatusEnum router = APIRouter() -# Dummy Agent for demonstration -async def dummy_agent_one(report_id: str, token_id: str) -> dict: - print(f"Dummy Agent One running for report {report_id} and token {token_id}") - await asyncio.sleep(2) # Simulate async work - return {"agent_one_data": "data_from_agent_one"} - -async def dummy_agent_two(report_id: str, token_id: str) -> dict: - print(f"Dummy Agent Two running for report {report_id} and token {token_id}") - await asyncio.sleep(1.5) # Simulate async work - return {"agent_two_data": "data_from_agent_two"} - -# Register agents -orchestrator_instance = create_orchestrator() -orchestrator_instance.register_agent("AgentOne", dummy_agent_one) -orchestrator_instance.register_agent("AgentTwo", dummy_agent_two) - @router.get("/") async def read_root(): return {"message": "Welcome to API v1"} async def _run_agents_in_background(report_id: str, token_id: str): - try: - await orchestrator_instance.execute_agents_concurrently(report_id, token_id) - except Exception as e: - api_logger.error(f"Agent execution failed for report {report_id}: {e}") - # Here you might want to update the report status to 'failed' in in_memory_reports - # For now, we'll just log it. - if report_id in in_memory_reports: - in_memory_reports[report_id]["status"] = "failed" - in_memory_reports[report_id]["detail"] = f"Agent execution failed: {e}" + async for session in get_db(): + report_repository = ReportRepository(session) + try: + await report_repository.update_report_status(report_id, ReportStatusEnum.RUNNING_AGENTS) + await process_report(report_id, token_id, report_repository) + await report_repository.update_report_status(report_id, ReportStatusEnum.COMPLETED) + break # Exit the async for loop after successful processing + except Exception as e: + api_logger.error(f"Report processing failed for report {report_id}: {e}") + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.FAILED, "error_message": str(e)}) + break # Exit the async for loop on failure @router.post("/report/generate", response_model=ReportResponse) -async def generate_report_endpoint(request: ReportRequest, background_tasks: BackgroundTasks): +async def generate_report_endpoint(request: ReportRequest, background_tasks: BackgroundTasks, session: AsyncSession = Depends(get_session)): api_logger.info(f"Received report generation request for token_id: {request.token_id}") - report_response = await generate_report(request) + report_repository = ReportRepository(session) + report_response = await generate_report(request, report_repository) report_id = report_response.report_id background_tasks.add_task(_run_agents_in_background, report_id, request.token_id) return report_response @router.get("/reports/{report_id}/status") -async def get_report_status(report_id: str): +async def get_report_status_endpoint(report_id: str, session: AsyncSession = Depends(get_session)): api_logger.info(f"Received status request for report_id: {report_id}") - report = get_report_status_from_memory(report_id) + report_repository = ReportRepository(session) + report = await get_report_status(report_id, report_repository) if not report: api_logger.error(f"Report with id {report_id} not found for status request.") raise ReportNotFoundException(detail="Report not found") return {"report_id": report_id, "status": report["status"]} @router.get("/reports/{report_id}/data") -async def get_report_data_endpoint(report_id: str): +async def get_report_data_endpoint(report_id: str, session: AsyncSession = Depends(get_session)): api_logger.info(f"Received data request for report_id: {report_id}") - report_result = get_report_data(report_id) + report_repository = ReportRepository(session) + report_result = await get_report_data(report_id, report_repository) if report_result: - if "data" in report_result: + if report_result.get("status") == ReportStatusEnum.COMPLETED.value: api_logger.info(f"Returning data for report_id: {report_id}") return report_result - elif report_result.get("status") == "processing": + elif report_result.get("status") == ReportStatusEnum.RUNNING_AGENTS.value or report_result.get("status") == ReportStatusEnum.PENDING.value or report_result.get("status") == ReportStatusEnum.RUNNING_AGENTS.value or report_result.get("status") == ReportStatusEnum.GENERATING_NLG.value or report_result.get("status") == ReportStatusEnum.GENERATING_SUMMARY.value: api_logger.warning(f"Report {report_id} is still processing.") - # Match test expectations exactly return JSONResponse( status_code=202, content={ "detail": "Report is still processing.", }, ) - elif report_result.get("status") == "failed": - api_logger.error(f"Report {report_id} failed with detail: {report_result.get("detail", "N/A")}") + elif report_result.get("status") == ReportStatusEnum.FAILED.value: + api_logger.error(f"Report {report_id} failed with detail: {report_result.get('detail', 'N/A')}") return JSONResponse( status_code=409, content={ "report_id": report_id, "message": "Report failed", - "detail": report_result.get("detail", "Report processing failed."), + "detail": report_result.get('detail', 'Report processing failed.'), }, ) api_logger.error(f"Report with id {report_id} not found or not completed for data request.") diff --git a/backend/app/core/__pycache__/config.cpython-313.pyc b/backend/app/core/__pycache__/config.cpython-313.pyc index b7326cc9..b1120bb1 100644 Binary files a/backend/app/core/__pycache__/config.cpython-313.pyc and b/backend/app/core/__pycache__/config.cpython-313.pyc differ diff --git a/backend/app/core/__pycache__/exceptions.cpython-313.pyc b/backend/app/core/__pycache__/exceptions.cpython-313.pyc deleted file mode 100644 index 2585b123..00000000 Binary files a/backend/app/core/__pycache__/exceptions.cpython-313.pyc and /dev/null differ diff --git a/backend/app/core/__pycache__/orchestrator.cpython-313.pyc b/backend/app/core/__pycache__/orchestrator.cpython-313.pyc index b146151a..b63516dd 100644 Binary files a/backend/app/core/__pycache__/orchestrator.cpython-313.pyc and b/backend/app/core/__pycache__/orchestrator.cpython-313.pyc differ diff --git a/backend/app/core/__pycache__/storage.cpython-313.pyc b/backend/app/core/__pycache__/storage.cpython-313.pyc deleted file mode 100644 index 82d67ad5..00000000 Binary files a/backend/app/core/__pycache__/storage.cpython-313.pyc and /dev/null differ diff --git a/backend/app/core/orchestrator.py b/backend/app/core/orchestrator.py index 2fdda67c..61e33f8c 100644 --- a/backend/app/core/orchestrator.py +++ b/backend/app/core/orchestrator.py @@ -1,15 +1,16 @@ import asyncio from typing import Callable, Dict, Any from urllib.parse import urlparse -from backend.app.services.report_service import in_memory_reports from backend.app.core.logger import orchestrator_logger from backend.app.services.agents.onchain_agent import fetch_onchain_metrics, fetch_tokenomics from backend.app.services.agents.social_sentiment_agent import SocialSentimentAgent from backend.app.services.agents.team_doc_agent import TeamDocAgent from backend.app.services.agents.code_audit_agent import CodeAuditAgent # Import CodeAuditAgent from backend.app.core.config import settings -from backend.app.services.summary import ReportSummaryEngine -from backend.app.services.nlg.report_nlg_engine import ReportNLGEngine +from backend.app.db.repositories.report_repository import ReportRepository +from backend.app.db.models.report_state import ReportStatusEnum +from sqlalchemy.ext.asyncio import AsyncSession +from backend.app.db.database import get_db, AsyncSessionLocal # Added for agent functions async def dummy_agent(report_id: str, token_id: str) -> Dict[str, Any]: """ @@ -19,152 +20,53 @@ async def dummy_agent(report_id: str, token_id: str) -> Dict[str, Any]: await asyncio.sleep(1) # Simulate some async work return {"dummy_data": f"Processed by dummy agent for {report_id}"} -class AIOrchestrator: +class Orchestrator: """ - Base class for coordinating multiple AI agents. - Designed to handle parallel asynchronous agent calls. + Concrete implementation of AIOrchestrator. + Instances of Orchestrator should be created using the `create_orchestrator` factory function. """ - - def __init__(self): + def __init__(self, session_factory: Callable[..., AsyncSession]): self._agents: Dict[str, Callable] = {} + self.report_repository = ReportRepository(session_factory) def register_agent(self, name: str, agent_func: Callable): - orchestrator_logger.info(f"Registering agent: {name}") - """ - Registers an AI agent with the orchestrator. - Args: - name (str): The name of the agent. - agent_func (Callable): The asynchronous function representing the agent. - """ self._agents[name] = agent_func - def get_agents(self) -> Dict[str, Callable]: - """ - Returns the dictionary of registered AI agents. - Returns: - Dict[str, Callable]: A dictionary where keys are agent names and values are agent functions. - """ - return self._agents.copy() - async def execute_agents(self, report_id: str, token_id: str) -> Dict[str, Any]: - orchestrator_logger.info(f"Executing agents for report_id: {report_id}, token_id: {token_id}") - tasks = {name: asyncio.create_task(agent_func(report_id, token_id)) for name, agent_func in self._agents.items()} - results = {} - - for name, task in tasks.items(): - try: - result = await asyncio.wait_for(task, timeout=settings.AGENT_TIMEOUT) # Added timeout - orchestrator_logger.debug(f"Agent {name} task result: {result}") - # Check if the agent itself returned a status - if isinstance(result, dict) and "status" in result: - results[name] = result - else: - results[name] = {"status": "completed", "data": result} - orchestrator_logger.info(f"Agent {name} completed for report {report_id}.") - except asyncio.TimeoutError: # Handle timeout specifically - orchestrator_logger.exception("Agent %s timed out for report %s", name, report_id) - results[name] = {"status": "failed", "error": "Agent timed out"} - except Exception as e: - orchestrator_logger.exception("Agent %s failed for report %s", name, report_id) - results[name] = {"status": "failed", "error": str(e)} - return results - - def aggregate_results(self, results: Dict[str, Any]) -> Dict[str, Any]: - orchestrator_logger.info("Aggregating results from executed agents.") - """ - Aggregates the results from the executed AI agents. - Args: - results (dict): A dictionary of results from the executed agents. - Returns: - The aggregated result. - """ - aggregated_data = {} - for agent_name, agent_result in results.items(): - if agent_result["status"] == "completed" and "data" in agent_result: - aggregated_data.update(agent_result["data"]) - return aggregated_data - -class Orchestrator(AIOrchestrator): - """ - Concrete implementation of AIOrchestrator. - Instances of Orchestrator should be created using the `create_orchestrator` factory function. - """ - async def execute_agents_concurrently(self, report_id: str, token_id: str) -> Dict[str, Any]: - orchestrator_logger.info(f"Executing agents concurrently for report_id: {report_id}, token_id: {token_id}") - agent_results = await self.execute_agents(report_id, token_id) - aggregated_data = self.aggregate_results(agent_results) - - # Determine overall status - overall_status = "completed" - if any(result["status"] == "failed" for result in agent_results.values()): - overall_status = "failed" - orchestrator_logger.error(f"Report {report_id} failed due to one or more agent failures.") - elif any(result["status"] == "partial_success" for result in agent_results.values()): - overall_status = "partial_success" - orchestrator_logger.warning(f"Report {report_id} completed with partial success due to agent failures.") - - # Initialize SummaryEngine and NLGEngine - summary_engine = ReportSummaryEngine() - nlg_engine = ReportNLGEngine() # Assuming NLGEngine is initialized here - - # Generate NLG outputs (placeholder for actual NLG generation) - # In a real scenario, nlg_engine would process aggregated_data to produce text - nlg_sections = await nlg_engine.generate_nlg_outputs(aggregated_data) # Assuming this method exists - - # Prepare score_input for the summary engine - score_input = { - "tokenomics_data": aggregated_data.get("tokenomics", {}), - "sentiment_data": aggregated_data.get("social_sentiment", {}), - "code_audit_data": aggregated_data.get("code_audit", {}), - "team_data": aggregated_data.get("team_documentation", {}), + tasks = { + name: agent_func(report_id, token_id) + for name, agent_func in self._agents.items() } - - # Generate scores - scores = summary_engine.generate_scores(score_input) - - # Build final summary - final_summary = summary_engine.build_final_summary(nlg_sections, scores) - - # Update in_memory_reports - if report_id in in_memory_reports: - in_memory_reports[report_id].update({ - "status": overall_status, - "data": aggregated_data, - "scores": scores, # Add scores to the report - "nlg_sections": nlg_sections, # Add NLG generated sections to the report - "summary": final_summary # Add final summary to the report - }) - orchestrator_logger.info(f"Report {report_id} status updated to {overall_status}.") - else: - orchestrator_logger.warning("Report ID %s not found in in_memory_reports during orchestration.", report_id) - - return aggregated_data - -def create_orchestrator(register_dummy: bool = False) -> Orchestrator: + results = await asyncio.gather(*tasks.values(), return_exceptions=True) + return dict(zip(tasks.keys(), results)) + + def aggregate_results(self, agent_results: Dict[str, Any]) -> Dict[str, Any]: + combined_data = {} + for agent_name, result in agent_results.items(): + if isinstance(result, dict) and result.get("status") == "completed": + combined_data.update(result.get("data", {})) + else: + orchestrator_logger.error(f"Agent {agent_name} failed or returned unexpected result: {result}") + return combined_data + + +def _is_valid_url(url: str | None, url_name: str) -> bool: + if not url: + orchestrator_logger.warning(f"Configuration Error: {url_name} is missing. Skipping agent registration.") + return False + parsed_url = urlparse(url) + if not parsed_url.scheme or not parsed_url.netloc or parsed_url.scheme not in ("http", "https"): + orchestrator_logger.warning( + f"Configuration Error: {url_name} ('{url}') is not a valid HTTP/HTTPS URL. Skipping agent registration." + ) + return False + return True + +async def create_orchestrator() -> Orchestrator: """ Factory function to create and configure an Orchestrator instance. - - Args: - register_dummy (bool): If True, a 'dummy_agent' will be registered with the orchestrator. - - Returns: - Orchestrator: A new instance of the Orchestrator. """ - def _is_valid_url(url: str | None, url_name: str) -> bool: - if not url: - orchestrator_logger.warning(f"Configuration Error: {url_name} is missing. Skipping agent registration.") - return False - parsed_url = urlparse(url) - if not parsed_url.scheme or not parsed_url.netloc or parsed_url.scheme not in ("http", "https"): - orchestrator_logger.warning( - f"Configuration Error: {url_name} ('{url}') is not a valid HTTP/HTTPS URL. Skipping agent registration." - ) - return False - return True - - orch = Orchestrator() - if register_dummy: - orch.register_agent('dummy_agent', dummy_agent) + orch = Orchestrator(AsyncSessionLocal) # Configure and register Onchain Data Agent onchain_metrics_url = settings.ONCHAIN_METRICS_URL @@ -173,47 +75,66 @@ def _is_valid_url(url: str | None, url_name: str) -> bool: if _is_valid_url(onchain_metrics_url, "ONCHAIN_METRICS_URL") and _is_valid_url(tokenomics_url, "TOKENOMICS_URL"): async def onchain_data_agent(report_id: str, token_id: str) -> Dict[str, Any]: orchestrator_logger.info(f"Calling Onchain Data Agent for report_id: {report_id}, token_id: {token_id}") - onchain_metrics_params = {"token_id": token_id, "report_id": report_id} - tokenomics_params = {"token_id": token_id} - - onchain_metrics_task = asyncio.create_task(fetch_onchain_metrics(url=onchain_metrics_url, params=onchain_metrics_params, token_id=token_id)) - tokenomics_task = asyncio.create_task(fetch_tokenomics(url=tokenomics_url, params=tokenomics_params, token_id=token_id)) - - onchain_metrics_result = {} - tokenomics_result = {} - - onchain_metrics_result, tokenomics_result = await asyncio.gather( - asyncio.wait_for(onchain_metrics_task, timeout=settings.AGENT_TIMEOUT - 1), - asyncio.wait_for(tokenomics_task, timeout=settings.AGENT_TIMEOUT - 1), - return_exceptions=True # This will allow us to handle exceptions for each task individually - ) - - # Handle individual task results and exceptions - if isinstance(onchain_metrics_result, asyncio.TimeoutError): - orchestrator_logger.error("Onchain metrics fetch timed out for report %s", report_id) - onchain_metrics_result = {"status": "failed", "error": "Onchain metrics fetch timed out"} - elif isinstance(onchain_metrics_result, Exception): - orchestrator_logger.error("Onchain metrics fetch failed for report %s", report_id) - onchain_metrics_result = {"status": "failed", "error": str(onchain_metrics_result)} - - if isinstance(tokenomics_result, asyncio.TimeoutError): - orchestrator_logger.error("Tokenomics fetch timed out for report %s", report_id) - tokenomics_result = {"status": "failed", "error": "Tokenomics fetch timed out"} - elif isinstance(tokenomics_result, Exception): - orchestrator_logger.error("Tokenomics fetch failed for report %s", report_id) - tokenomics_result = {"status": "failed", "error": str(tokenomics_result)} - - overall_agent_status = "completed" - if onchain_metrics_result.get("status") == "failed" or tokenomics_result.get("status") == "failed": - overall_agent_status = "failed" + async with get_db() as session: # New session for agent + report_repository = ReportRepository(session) + await report_repository.update_report_status(report_id, ReportStatusEnum.RUNNING_AGENTS) + onchain_metrics_params = {"token_id": token_id, "report_id": report_id} + tokenomics_params = {"token_id": token_id} + + onchain_metrics_task = asyncio.create_task(fetch_onchain_metrics(url=onchain_metrics_url, params=onchain_metrics_params, token_id=token_id)) + tokenomics_task = asyncio.create_task(fetch_tokenomics(url=tokenomics_url, params=tokenomics_params, token_id=token_id)) + + onchain_metrics_result = {} + tokenomics_result = {} + + try: + onchain_metrics_result, tokenomics_result = await asyncio.gather( + asyncio.wait_for(onchain_metrics_task, timeout=settings.AGENT_TIMEOUT - 1), + asyncio.wait_for(tokenomics_task, timeout=settings.AGENT_TIMEOUT - 1), + return_exceptions=True # This will allow us to handle exceptions for each task individually + ) - return { - "status": overall_agent_status, - "data": { - "onchain_metrics": onchain_metrics_result, - "tokenomics": tokenomics_result - } - } + # Handle individual task results and exceptions + if isinstance(onchain_metrics_result, asyncio.TimeoutError): + orchestrator_logger.error("Onchain metrics fetch timed out for report %s", report_id) + onchain_metrics_result = {"status": "failed", "error": "Onchain metrics fetch timed out"} + elif isinstance(onchain_metrics_result, Exception): + orchestrator_logger.error("Onchain metrics fetch failed for report %s", report_id) + onchain_metrics_result = {"status": "failed", "error": str(onchain_metrics_result)} + + if isinstance(tokenomics_result, asyncio.TimeoutError): + orchestrator_logger.error("Tokenomics fetch timed out for report %s", report_id) + tokenomics_result = {"status": "failed", "error": "Tokenomics fetch timed out"} + elif isinstance(tokenomics_result, Exception): + orchestrator_logger.error("Tokenomics fetch failed for report %s", report_id) + tokenomics_result = {"status": "failed", "error": str(tokenomics_result)} + + overall_agent_status = "completed" + if onchain_metrics_result.get("status") == "failed" or tokenomics_result.get("status") == "failed": + overall_agent_status = "failed" + + result = { + "status": overall_agent_status, + "data": { + "onchain_metrics": onchain_metrics_result, + "tokenomics": tokenomics_result + } + } + existing_report = await report_repository.get_report_by_id(report_id) + existing_partial_agent_output = existing_report.partial_agent_output if existing_report else {} + if overall_agent_status == "completed": + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_COMPLETED, "partial_agent_output": {**existing_partial_agent_output, "onchain_data_agent": result}}) + else: + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": "Onchain Data Agent failed", "partial_agent_output": {**existing_partial_agent_output, "onchain_data_agent": result}}) + return result + except asyncio.TimeoutError as e: + orchestrator_logger.error("Onchain Data Agent timed out for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": "Agent timed out"} + except Exception as e: + orchestrator_logger.exception("Onchain Data Agent failed for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": str(e)} orch.register_agent('onchain_data_agent', onchain_data_agent) else: orchestrator_logger.warning("Onchain Data Agent will not be registered due to invalid configuration.") @@ -221,86 +142,95 @@ async def onchain_data_agent(report_id: str, token_id: str) -> Dict[str, Any]: # Configure and register Social Sentiment Agent async def social_sentiment_agent_func(report_id: str, token_id: str) -> Dict[str, Any]: orchestrator_logger.info(f"Calling Social Sentiment Agent for report_id: {report_id}, token_id: {token_id}") - agent = SocialSentimentAgent() - social_sentiment_data = {} - try: - social_data = await asyncio.wait_for(agent.fetch_social_data(token_id), timeout=settings.AGENT_TIMEOUT - 1) - sentiment_report = await asyncio.wait_for(agent.analyze_sentiment(social_data), timeout=settings.AGENT_TIMEOUT - 1) - social_sentiment_data = { - "social_sentiment": { - "overall_sentiment": sentiment_report.get("overall_sentiment"), - "score": sentiment_report.get("score"), - "summary": sentiment_report.get("details") # Storing details as summary for now - } - } - orchestrator_logger.info(f"Social Sentiment Agent completed for report {report_id}.") - except asyncio.TimeoutError: - orchestrator_logger.error("Social Sentiment Agent timed out for report %s", report_id) - return {"status": "failed", "error": "Agent timed out"} - except Exception as e: - orchestrator_logger.exception("Social Sentiment Agent failed for report %s", report_id) - return {"status": "failed", "error": str(e)} - return { - "status": "completed", - "data": { - "social_sentiment": { - "overall_sentiment": sentiment_report.get("overall_sentiment"), - "score": sentiment_report.get("score"), - "summary": sentiment_report.get("details") # Storing details as summary for now + async with get_db() as session: # New session for agent + report_repository = ReportRepository(session) + await report_repository.update_report_status(report_id, ReportStatusEnum.RUNNING_AGENTS) + agent = SocialSentimentAgent() + try: + social_data = await asyncio.wait_for(agent.fetch_social_data(token_id), timeout=settings.AGENT_TIMEOUT - 1) + sentiment_report = await asyncio.wait_for(agent.analyze_sentiment(social_data), timeout=settings.AGENT_TIMEOUT - 1) + orchestrator_logger.info(f"Social Sentiment Agent completed for report {report_id}.") + result = { + "status": "completed", + "data": { + "social_sentiment": { + "overall_sentiment": sentiment_report.get("overall_sentiment"), + "score": sentiment_report.get("score"), + "summary": sentiment_report.get("details") # Storing details as summary for now + } + } } - } - } + existing_report = await report_repository.get_report_by_id(report_id) + existing_partial_agent_output = existing_report.partial_agent_output if existing_report else {} + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_COMPLETED, "partial_agent_output": {**existing_partial_agent_output, "social_sentiment_agent": result}}) + return result + except asyncio.TimeoutError as e: + orchestrator_logger.error("Social Sentiment Agent timed out for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": "Agent timed out"} + except Exception as e: + orchestrator_logger.exception("Social Sentiment Agent failed for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": str(e)} orch.register_agent('social_sentiment_agent', social_sentiment_agent_func) # Configure and register Team and Documentation Agent async def team_documentation_agent(report_id: str, token_id: str) -> Dict[str, Any]: orchestrator_logger.info(f"Calling Team and Documentation Agent for report_id: {report_id}, token_id: {token_id}") - agent = TeamDocAgent() - team_analysis = [] - whitepaper_summary = {} - - # Placeholder for fetching token-related data (URLs, whitepaper text) - # In a real scenario, this data would be fetched based on token_id - # For now, we'll use dummy data or assume it comes from settings - team_profile_urls = settings.TEAM_PROFILE_URLS.get(token_id, []) - whitepaper_text_source = settings.WHITEPAPER_TEXT_SOURCES.get(token_id, "") - - try: - # Scrape team profiles - orchestrator_logger.info(f"Scraping team profiles for token {token_id} from URLs: {team_profile_urls}") - team_analysis = await asyncio.wait_for( - asyncio.to_thread(agent.scrape_team_profiles, team_profile_urls), - timeout=settings.AGENT_TIMEOUT - 1 - ) - orchestrator_logger.info(f"Team profile scraping completed for token {token_id}.") + async with get_db() as session: # New session for agent + report_repository = ReportRepository(session) + await report_repository.update_report_status(report_id, ReportStatusEnum.RUNNING_AGENTS) + agent = TeamDocAgent() + team_analysis = [] + whitepaper_summary = {} + + # Placeholder for fetching token-related data (URLs, whitepaper text) + # In a real scenario, this data would be fetched based on token_id + # For now, we'll use dummy data or assume it comes from settings + team_profile_urls = settings.TEAM_PROFILE_URLS.get(token_id, []) + whitepaper_text_source = settings.WHITEPAPER_TEXT_SOURCES.get(token_id, "") - # Analyze whitepaper - if whitepaper_text_source: - orchestrator_logger.info(f"Analyzing whitepaper for token {token_id} from source: {whitepaper_text_source}") - whitepaper_summary = await asyncio.wait_for( - asyncio.to_thread(agent.analyze_whitepaper, whitepaper_text_source), + try: + # Scrape team profiles + orchestrator_logger.info(f"Scraping team profiles for token {token_id} from URLs: {team_profile_urls}") + team_analysis = await asyncio.wait_for( + asyncio.to_thread(agent.scrape_team_profiles, team_profile_urls), timeout=settings.AGENT_TIMEOUT - 1 ) - orchestrator_logger.info(f"Whitepaper analysis completed for token {token_id}.") - else: - orchestrator_logger.warning(f"No whitepaper text source provided for token {token_id}. Skipping whitepaper analysis.") + orchestrator_logger.info(f"Team profile scraping completed for token {token_id}.") - except asyncio.TimeoutError: - orchestrator_logger.error("Team and Documentation Agent timed out for report %s", report_id) - return {"status": "failed", "error": "Agent timed out"} - except Exception as e: - orchestrator_logger.exception("Team and Documentation Agent failed for report %s", report_id) - return {"status": "failed", "error": str(e)} - - return { - "status": "completed", - "data": { - "team_documentation": { - "team_analysis": team_analysis, - "whitepaper_summary": whitepaper_summary + # Analyze whitepaper + if whitepaper_text_source: + orchestrator_logger.info(f"Analyzing whitepaper for token {token_id} from source: {whitepaper_text_source}") + whitepaper_summary = await asyncio.wait_for( + asyncio.to_thread(agent.analyze_whitepaper, whitepaper_text_source), + timeout=settings.AGENT_TIMEOUT - 1 + ) + orchestrator_logger.info(f"Whitepaper analysis completed for token {token_id}.") + else: + orchestrator_logger.warning(f"No whitepaper text source provided for token {token_id}. Skipping whitepaper analysis.") + + result = { + "status": "completed", + "data": { + "team_documentation": { + "team_analysis": team_analysis, + "whitepaper_summary": whitepaper_summary + } + } } - } - } + existing_report = await report_repository.get_report_by_id(report_id) + existing_partial_agent_output = existing_report.partial_agent_output if existing_report else {} + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_COMPLETED, "partial_agent_output": {**existing_partial_agent_output, "team_documentation_agent": result}}) + return result + except asyncio.TimeoutError as e: + orchestrator_logger.error("Team and Documentation Agent timed out for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": "Agent timed out"} + except Exception as e: + orchestrator_logger.exception("Team and Documentation Agent failed for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": str(e)} orch.register_agent('team_documentation_agent', team_documentation_agent) # Configure and register Code/Audit Agent @@ -308,53 +238,60 @@ async def team_documentation_agent(report_id: str, token_id: str) -> Dict[str, A if _is_valid_url(code_audit_repo_url, "CODE_AUDIT_REPO_URL"): async def code_audit_agent_func(report_id: str, token_id: str) -> Dict[str, Any]: orchestrator_logger.info(f"Calling Code/Audit Agent for report_id: {report_id}, token_id: {token_id}") - code_metrics_data = {} - audit_summary_data = [] - try: - async with CodeAuditAgent() as agent: - # Fetch repo metrics - orchestrator_logger.info(f"Fetching repository metrics for {code_audit_repo_url}") - code_metrics = await asyncio.wait_for( - agent.fetch_repo_metrics(code_audit_repo_url), - timeout=settings.AGENT_TIMEOUT - 1 - ) - code_metrics_data = code_metrics.model_dump() - - # Analyze code activity - orchestrator_logger.info(f"Analyzing code activity for {code_audit_repo_url}") - code_activity_analysis = await asyncio.wait_for( - agent.analyze_code_activity(code_metrics), - timeout=settings.AGENT_TIMEOUT - 1 - ) - code_metrics_data.update({"activity_analysis": code_activity_analysis}) - - # Search and summarize audit reports - orchestrator_logger.info(f"Searching and summarizing audit reports for {code_audit_repo_url}") - audit_summary = await asyncio.wait_for( - agent.search_and_summarize_audit_reports(code_audit_repo_url), - timeout=settings.AGENT_TIMEOUT - 1 - ) - audit_summary_data = audit_summary - - except asyncio.TimeoutError: - orchestrator_logger.error("Code/Audit Agent timed out for report %s", report_id) - return {"status": "failed", "error": "Agent timed out"} - except Exception as e: - orchestrator_logger.exception("Code/Audit Agent failed for report %s", report_id) - return {"status": "failed", "error": str(e)} - - return { - "status": "completed", - "data": { - "code_audit": { - "code_metrics": code_metrics_data, - "audit_summary": audit_summary_data + async with get_db() as session: # New session for agent + report_repository = ReportRepository(session) + await report_repository.update_report_status(report_id, ReportStatusEnum.RUNNING_AGENTS) + code_metrics_data = {} + audit_summary_data = [] + try: + async with CodeAuditAgent() as agent: + # Fetch repo metrics + orchestrator_logger.info(f"Fetching repository metrics for {code_audit_repo_url}") + code_metrics = await asyncio.wait_for( + agent.fetch_repo_metrics(code_audit_repo_url), + timeout=settings.AGENT_TIMEOUT - 1 + ) + code_metrics_data = code_metrics.model_dump() + + # Analyze code activity + orchestrator_logger.info(f"Analyzing code activity for {code_audit_repo_url}") + code_activity_analysis = await asyncio.wait_for( + agent.analyze_code_activity(code_metrics), + timeout=settings.AGENT_TIMEOUT - 1 + ) + code_metrics_data.update({"activity_analysis": code_activity_analysis}) + + # Search and summarize audit reports + orchestrator_logger.info(f"Searching and summarizing audit reports for {code_audit_repo_url}") + audit_summary = await asyncio.wait_for( + agent.search_and_summarize_audit_reports(code_audit_repo_url), + timeout=settings.AGENT_TIMEOUT - 1 + ) + audit_summary_data = audit_summary + + result = { + "status": "completed", + "data": { + "code_audit": { + "code_metrics": code_metrics_data, + "audit_summary": audit_summary_data + } + } } - } - } + existing_report = await report_repository.get_report_by_id(report_id) + existing_partial_agent_output = existing_report.partial_agent_output if existing_report else {} + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_COMPLETED, "partial_agent_output": {**existing_partial_agent_output, "code_audit_agent": result}}) + return result + except asyncio.TimeoutError as e: + orchestrator_logger.error("Code/Audit Agent timed out for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": "Agent timed out"} + except Exception as e: + orchestrator_logger.exception("Code/Audit Agent failed for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.AGENTS_FAILED, "error_message": str(e)}) + return {"status": "failed", "error": str(e)} orch.register_agent('code_audit_agent', code_audit_agent_func) else: orchestrator_logger.warning("Code/Audit Agent will not be registered due to invalid CODE_AUDIT_REPO_URL configuration.") - - return orch \ No newline at end of file + return orch diff --git a/backend/app/db/__pycache__/base.cpython-313.pyc b/backend/app/db/__pycache__/base.cpython-313.pyc new file mode 100644 index 00000000..5dbdce70 Binary files /dev/null and b/backend/app/db/__pycache__/base.cpython-313.pyc differ diff --git a/backend/app/db/migrations/README b/backend/app/db/migrations/README new file mode 100644 index 00000000..98e4f9c4 --- /dev/null +++ b/backend/app/db/migrations/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/backend/app/db/migrations/__pycache__/env.cpython-313.pyc b/backend/app/db/migrations/__pycache__/env.cpython-313.pyc new file mode 100644 index 00000000..0d5954f1 Binary files /dev/null and b/backend/app/db/migrations/__pycache__/env.cpython-313.pyc differ diff --git a/backend/app/db/migrations/env.py b/backend/app/db/migrations/env.py new file mode 100644 index 00000000..27ed9773 --- /dev/null +++ b/backend/app/db/migrations/env.py @@ -0,0 +1,97 @@ +from logging.config import fileConfig + +from sqlalchemy import engine_from_config +from sqlalchemy import pool + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +import os, sys +sys.path.append(os.getcwd()) +from backend.app.db.base import Base + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +target_metadata = Base.metadata + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + import asyncio + + async def process_migrations(): + connectable = engine_from_config( + config.get_section(config.config_ini_section, {}), + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + context.configure( + connection=connection, target_metadata=target_metadata + ) + + async with context.begin_transaction(): + context.run_migrations() + + asyncio.run(process_migrations()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/backend/app/db/migrations/script.py.mako b/backend/app/db/migrations/script.py.mako new file mode 100644 index 00000000..fbc4b07d --- /dev/null +++ b/backend/app/db/migrations/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/backend/app/db/models/__pycache__/report_state.cpython-313-pytest-8.2.0.pyc b/backend/app/db/models/__pycache__/report_state.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index cd774a64..00000000 Binary files a/backend/app/db/models/__pycache__/report_state.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/db/models/report_state.py b/backend/app/db/models/report_state.py index d3867438..2cdee436 100644 --- a/backend/app/db/models/report_state.py +++ b/backend/app/db/models/report_state.py @@ -10,6 +10,14 @@ class ReportStatusEnum(PyEnum): RUNNING = "running" FAILED = "failed" COMPLETED = "completed" + RUNNING_AGENTS = "running_agents" + AGENTS_COMPLETED = "agents_completed" + AGENTS_FAILED = "agents_failed" + AGENTS_PARTIAL_SUCCESS = "agents_partial_success" + GENERATING_NLG = "generating_nlg" + NLG_COMPLETED = "nlg_completed" + GENERATING_SUMMARY = "generating_summary" + SUMMARY_COMPLETED = "summary_completed" @@ -24,3 +32,4 @@ class ReportState(Base): partial_agent_output = Column(JSON, nullable=True) # Stores partial outputs from agents raw_data = Column(JSON, nullable=True) # Stores raw data collected by agents final_report_json = Column(JSON, nullable=True) # Stores the final generated report JSON + error_message = Column(String, nullable=True) # New column for error messages diff --git a/backend/app/db/repositories/__pycache__/__init__.cpython-313.pyc b/backend/app/db/repositories/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 00000000..bc11a447 Binary files /dev/null and b/backend/app/db/repositories/__pycache__/__init__.cpython-313.pyc differ diff --git a/backend/app/db/repositories/__pycache__/report_repository.cpython-313.pyc b/backend/app/db/repositories/__pycache__/report_repository.cpython-313.pyc new file mode 100644 index 00000000..0e18a824 Binary files /dev/null and b/backend/app/db/repositories/__pycache__/report_repository.cpython-313.pyc differ diff --git a/backend/app/db/repositories/report_repository.py b/backend/app/db/repositories/report_repository.py index c2944eca..6b54c930 100644 --- a/backend/app/db/repositories/report_repository.py +++ b/backend/app/db/repositories/report_repository.py @@ -1,71 +1,88 @@ +from typing import Callable, Dict, Any from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy import select, update from sqlalchemy.exc import IntegrityError from backend.app.db.models.report import Report from backend.app.db.models.report_state import ReportState, ReportStatusEnum -from typing import Dict, Any class ReportRepository: - def __init__(self, session: AsyncSession): - self.session = session + def __init__(self, session_factory: Callable[..., AsyncSession]): + self.session_factory = session_factory async def create_report_entry(self, report_id: str) -> Report: - try: - report = Report(id=report_id) - self.session.add(report) - report_state = ReportState(report_id=report_id, status=ReportStatusEnum.PENDING) - self.session.add(report_state) - await self.session.commit() - await self.session.refresh(report) - return report - except IntegrityError: - await self.session.rollback() - # If a report with this ID already exists, fetch and return it - existing_report = await self.session.execute(select(Report).where(Report.id == report_id)) - report = existing_report.scalar_one_or_none() - if report: + async with self.session_factory() as session: + try: + report = Report(id=report_id) + session.add(report) + report_state = ReportState(report_id=report_id, status=ReportStatusEnum.PENDING) + session.add(report_state) + await session.commit() + await session.refresh(report) return report - else: - # This case should ideally not be reached if IntegrityError is due to report_id + except IntegrityError: + await session.rollback() + # If a report with this ID already exists, fetch and return it + existing_report = await session.execute(select(Report).where(Report.id == report_id)) + report = existing_report.scalar_one_or_none() + if report: + return report + else: + # This case should ideally not be reached if IntegrityError is due to report_id + raise + except Exception: + await session.rollback() raise - except Exception: - await self.session.rollback() - raise async def update_report_status(self, report_id: str, status: ReportStatusEnum) -> ReportState | None: - try: - stmt = update(ReportState).where(ReportState.report_id == report_id).values(status=status).returning(ReportState) - result = await self.session.execute(stmt) - updated_report_state = result.scalar_one_or_none() - await self.session.commit() - return updated_report_state - except Exception: - await self.session.rollback() - raise + async with self.session_factory() as session: + try: + stmt = update(ReportState).where(ReportState.report_id == report_id).values(status=status).returning(ReportState) + result = await session.execute(stmt) + updated_report_state = result.scalar_one_or_none() + await session.commit() + return updated_report_state + except Exception: + await session.rollback() + raise async def store_partial_report_results(self, report_id: str, partial_data: Dict[str, Any]) -> ReportState | None: - try: - stmt = update(ReportState).where(ReportState.report_id == report_id).values(partial_agent_output=partial_data).returning(ReportState) - result = await self.session.execute(stmt) - updated_report_state = result.scalar_one_or_none() - await self.session.commit() - return updated_report_state - except Exception: - await self.session.rollback() - raise + async with self.session_factory() as session: + try: + stmt = update(ReportState).where(ReportState.report_id == report_id).values(partial_agent_output=partial_data).returning(ReportState) + result = await session.execute(stmt) + updated_report_state = result.scalar_one_or_none() + await session.commit() + return updated_report_state + except Exception: + await session.rollback() + raise async def save_final_report(self, report_id: str, data: Dict[str, Any]) -> ReportState | None: - try: - stmt = update(ReportState).where(ReportState.report_id == report_id).values(final_report_json=data, status=ReportStatusEnum.COMPLETED).returning(ReportState) - result = await self.session.execute(stmt) - updated_report_state = result.scalar_one_or_none() - await self.session.commit() - return updated_report_state - except Exception: - await self.session.rollback() - raise + async with self.session_factory() as session: + try: + stmt = update(ReportState).where(ReportState.report_id == report_id).values(final_report_json=data, status=ReportStatusEnum.COMPLETED).returning(ReportState) + result = await session.execute(stmt) + updated_report_state = result.scalar_one_or_none() + await session.commit() + return updated_report_state + except Exception: + await session.rollback() + raise - async def get_report_state(self, report_id: str) -> ReportState | None: - stmt = select(ReportState).where(ReportState.report_id == report_id) - result = await self.session.execute(stmt) - return result.scalar_one_or_none() + async def get_report_by_id(self, report_id: str) -> ReportState | None: + async with self.session_factory() as session: + stmt = select(ReportState).where(ReportState.report_id == report_id) + result = await session.execute(stmt) + return result.scalar_one_or_none() + + async def update_partial(self, report_id: str, data: Dict[str, Any]) -> ReportState | None: + async with self.session_factory() as session: + try: + stmt = update(ReportState).where(ReportState.report_id == report_id).values(**data).returning(ReportState) + result = await session.execute(stmt) + updated_report_state = result.scalar_one_or_none() + await session.commit() + return updated_report_state + except Exception: + await session.rollback() + raise diff --git a/backend/app/services/__pycache__/report_processor.cpython-313-pytest-8.4.2.pyc b/backend/app/services/__pycache__/report_processor.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index f1519e24..00000000 Binary files a/backend/app/services/__pycache__/report_processor.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/__pycache__/report_processor.cpython-313.pyc b/backend/app/services/__pycache__/report_processor.cpython-313.pyc deleted file mode 100644 index bc69aff3..00000000 Binary files a/backend/app/services/__pycache__/report_processor.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/agents/__pycache__/price_agent.cpython-313.pyc b/backend/app/services/agents/__pycache__/price_agent.cpython-313.pyc deleted file mode 100644 index 9bfa24ac..00000000 Binary files a/backend/app/services/agents/__pycache__/price_agent.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/agents/__pycache__/trend_agent.cpython-313.pyc b/backend/app/services/agents/__pycache__/trend_agent.cpython-313.pyc deleted file mode 100644 index a1438f6f..00000000 Binary files a/backend/app/services/agents/__pycache__/trend_agent.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/agents/__pycache__/volume_agent.cpython-313.pyc b/backend/app/services/agents/__pycache__/volume_agent.cpython-313.pyc deleted file mode 100644 index 3f7fb35b..00000000 Binary files a/backend/app/services/agents/__pycache__/volume_agent.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.2.0.pyc b/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 2c1b009b..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.4.2.pyc b/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 009a4fa6..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_code_audit_agent.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.2.0.pyc b/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 10f6adbe..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc b/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 9d39a983..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.2.0.pyc b/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 0ba0d8a2..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.4.2.pyc b/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index a4f74401..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_social_sentiment_agent.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.2.0.pyc b/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index d75fe2bd..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.4.2.pyc b/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 8ffc8568..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.2.0.pyc b/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index f8f4732b..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.4.2.pyc b/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 956ef0e4..00000000 Binary files a/backend/app/services/agents/tests/__pycache__/test_team_doc_agent_new_feature.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.2.0.pyc b/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index a9599917..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 6838bcdb..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_llm_client.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.2.0.pyc b/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 4a5e5954..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index eb0cb5f0..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.2.0.pyc b/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index ced67d84..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 22bc55c8..00000000 Binary files a/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/report_processor.py b/backend/app/services/report_processor.py index 74b04822..73c2715f 100644 --- a/backend/app/services/report_processor.py +++ b/backend/app/services/report_processor.py @@ -1,18 +1,20 @@ import asyncio import logging -from backend.app.core.orchestrator import AIOrchestrator +from backend.app.core.orchestrator import Orchestrator from backend.app.services.agents.price_agent import run as price_agent_run from backend.app.services.agents.trend_agent import run as trend_agent_run from backend.app.services.agents.volume_agent import run as volume_agent_run from backend.app.core.storage import save_report_data, set_report_status, try_set_processing from backend.app.services.nlg.report_nlg_engine import ReportNLGEngine from backend.app.services.summary.report_summary_engine import ReportSummaryEngine +from backend.app.db.repositories.report_repository import ReportRepository +from backend.app.db.models.report_state import ReportStatusEnum # Import ReportStatusEnum logger = logging.getLogger(__name__) -async def process_report(report_id: str, token_id: str) -> bool: +async def process_report(report_id: str, token_id: str, report_repository: ReportRepository) -> bool: """ Simulates a background report generation process. Updates report_status to 'processing' and then to 'completed' on success. @@ -31,7 +33,7 @@ async def process_report(report_id: str, token_id: str) -> bool: logger.info("Processing report %s for token %s", report_id, token_id) try: - orchestrator = AIOrchestrator() + orchestrator = Orchestrator(report_repository.session) orchestrator.register_agent("price_agent", price_agent_run) orchestrator.register_agent("trend_agent", trend_agent_run) orchestrator.register_agent("volume_agent", volume_agent_run) @@ -40,19 +42,33 @@ async def process_report(report_id: str, token_id: str) -> bool: combined_report_data = orchestrator.aggregate_results(agent_results) # Generate NLG outputs + await report_repository.update_report_status(report_id, ReportStatusEnum.GENERATING_NLG) nlg_engine = ReportNLGEngine() - nlg_outputs = await nlg_engine.generate_nlg_outputs(combined_report_data) + try: + nlg_outputs = await nlg_engine.generate_nlg_outputs(combined_report_data) + await report_repository.update_report_status(report_id, ReportStatusEnum.NLG_COMPLETED) + except Exception as e: + logger.exception("Error generating NLG outputs for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.FAILED, "error": str(e)}) + raise # Generate summary + await report_repository.update_report_status(report_id, ReportStatusEnum.GENERATING_SUMMARY) summary_engine = ReportSummaryEngine() - scores_input = { - "tokenomics_data": combined_report_data.get("tokenomics", {}), - "sentiment_data": combined_report_data.get("social_sentiment", {}), - "code_audit_data": combined_report_data.get("code_audit", {}), - "team_data": combined_report_data.get("team_documentation", {}) - } - scores = summary_engine.generate_scores(scores_input) - final_narrative_summary = summary_engine.build_final_summary(nlg_outputs, scores) + try: + scores_input = { + "tokenomics_data": combined_report_data.get("tokenomics", {}), + "sentiment_data": combined_report_data.get("social_sentiment", {}), + "code_audit_data": combined_report_data.get("code_audit", {}), + "team_data": combined_report_data.get("team_documentation", {}) + } + scores = summary_engine.generate_scores(scores_input) + final_narrative_summary = summary_engine.build_final_summary(nlg_outputs, scores) + await report_repository.update_report_status(report_id, ReportStatusEnum.SUMMARY_COMPLETED) + except Exception as e: + logger.exception("Error generating summary for report %s", report_id) + await report_repository.update_partial(report_id, {"status": ReportStatusEnum.FAILED, "error": str(e)}) + raise # Determine overall status based on agent results overall_status = "completed" @@ -67,18 +83,18 @@ async def process_report(report_id: str, token_id: str) -> bool: } # Save the combined_report_data first - save_report_data(report_id, combined_report_data, update_status=False) + await report_repository.store_partial_report_results(report_id, combined_report_data) # Then save the final report content - save_report_data(report_id, final_report_content, key="final_report", update_status=False) + await report_repository.save_final_report(report_id, final_report_content) - set_report_status(report_id, overall_status) + await report_repository.update_report_status(report_id, overall_status) logger.info("Report %s %s.", report_id, overall_status) return True except asyncio.CancelledError: - set_report_status(report_id, "cancelled") + await report_repository.update_report_status(report_id, ReportStatusEnum.FAILED) raise except Exception: logger.exception("Error processing report %s for token %s", report_id, token_id) - set_report_status(report_id, "failed") + await report_repository.update_report_status(report_id, ReportStatusEnum.FAILED) raise \ No newline at end of file diff --git a/backend/app/services/report_service.py b/backend/app/services/report_service.py index 744f992a..d75a2e7f 100644 --- a/backend/app/services/report_service.py +++ b/backend/app/services/report_service.py @@ -1,46 +1,34 @@ from backend.app.models.report_models import ReportRequest, ReportResponse from backend.app.utils.id_generator import generate_report_id -from typing import Dict +from typing import Dict, Any from backend.app.core.logger import services_logger +from backend.app.db.repositories.report_repository import ReportRepository +from backend.app.db.models.report_state import ReportStatusEnum -# In-memory storage for reports (to be replaced with persistent storage) -in_memory_reports: Dict[str, Dict] = {} - -async def generate_report(request: ReportRequest) -> ReportResponse: +async def generate_report(request: ReportRequest, report_repository: ReportRepository) -> ReportResponse: services_logger.info(f"Generating new report for token_id: {request.token_id}") report_id = generate_report_id() - # Store a placeholder report object - in_memory_reports[report_id] = { - "token_id": request.token_id, - "parameters": request.parameters, - "status": "processing", - "report_id": report_id - } - return ReportResponse(report_id=report_id, status="processing") - -async def save_report_data(report_id: str, data: Dict): - if report_id in in_memory_reports: - services_logger.info(f"Saving data for report_id: {report_id}") - in_memory_reports[report_id].update(data) - else: - # Handle case where report_id does not exist, or log a warning - services_logger.warning("Report ID %s not found for saving data.", report_id) + await report_repository.create_report_entry(report_id) + return ReportResponse(report_id=report_id, status=ReportStatusEnum.PENDING.value) -def get_report_status_from_memory(report_id: str) -> Dict | None: - services_logger.info(f"Retrieving status for report_id: {report_id} from memory.") - return in_memory_reports.get(report_id) +async def get_report_status(report_id: str, report_repository: ReportRepository) -> Dict[str, Any] | None: + services_logger.info(f"Retrieving status for report_id: {report_id} from database.") + report = await report_repository.get_report_state(report_id) + if report: + return {"report_id": report.report_id, "status": report.status.value} + return None -def get_report_data(report_id: str) -> Dict | None: +async def get_report_data(report_id: str, report_repository: ReportRepository) -> Dict[str, Any] | None: services_logger.info(f"Attempting to retrieve data for report_id: {report_id}") - report = in_memory_reports.get(report_id) + report = await report_repository.get_report_state(report_id) if not report: services_logger.warning(f"Report with id {report_id} not found when attempting to retrieve data.") return None - if report.get("status") == "completed": + if report.status == ReportStatusEnum.COMPLETED: services_logger.info(f"Report {report_id} is completed, returning data.") return { - "report_id": report_id, - "data": report.get("data", {}), + "report_id": report.report_id, + "data": report.final_report_json if report.final_report_json else report.partial_agent_output, } - services_logger.info(f"Report {report_id} is in status: {report.get("status")}, returning status only.") - return {"report_id": report_id, "status": report.get("status")} + services_logger.info(f"Report {report_id} is in status: {report.status.value}, returning status only.") + return {"report_id": report.report_id, "status": report.status.value, "detail": report.error_message} diff --git a/backend/app/services/summary/report_summary_engine.py b/backend/app/services/summary/report_summary_engine.py index c3f9d1a4..50316650 100644 --- a/backend/app/services/summary/report_summary_engine.py +++ b/backend/app/services/summary/report_summary_engine.py @@ -22,7 +22,7 @@ def generate_scores(self, data: Dict[str, Any]) -> Dict[str, float]: # Code Maturity Score # Assuming 'code_audit_data' contains 'lines_of_code', 'test_coverage', 'bug_density' code_audit_data = data.get("code_audit_data", {}) - lines_of_code = code_audit_data.get("lines_of_code", 1000) # Placeholder + test_coverage = code_audit_data.get("test_coverage", 0.7) # Placeholder bug_density = code_audit_data.get("bug_density", 0.1) # Placeholder # Simple rule: higher coverage, lower bug density, reasonable LOC diff --git a/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.2.0.pyc b/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index ad9e5527..00000000 Binary files a/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index d62a389e..00000000 Binary files a/backend/app/services/summary/tests/__pycache__/test_report_summary_engine.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.2.0.pyc b/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 6198a918..00000000 Binary files a/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.4.2.pyc b/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 94cb2a90..00000000 Binary files a/backend/app/services/summary/tests/__pycache__/test_summary_engine_advanced.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc b/backend/app/services/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 1de64a5f..00000000 Binary files a/backend/app/services/tests/__pycache__/test_onchain_agent.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/services/validation/__pycache__/__init__.cpython-313.pyc b/backend/app/services/validation/__pycache__/__init__.cpython-313.pyc deleted file mode 100644 index 7d055527..00000000 Binary files a/backend/app/services/validation/__pycache__/__init__.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/validation/__pycache__/validation_engine.cpython-313.pyc b/backend/app/services/validation/__pycache__/validation_engine.cpython-313.pyc deleted file mode 100644 index d456c734..00000000 Binary files a/backend/app/services/validation/__pycache__/validation_engine.cpython-313.pyc and /dev/null differ diff --git a/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.2.0.pyc b/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 689f52c3..00000000 Binary files a/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index e1d2e8b6..00000000 Binary files a/backend/app/services/validation/tests/__pycache__/test_validation_engine.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.2.0.pyc b/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 3dbdfb89..00000000 Binary files a/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc b/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index e5611a8c..00000000 Binary files a/backend/app/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/main.py b/backend/main.py index 967cf581..57c2ce27 100644 --- a/backend/main.py +++ b/backend/main.py @@ -4,6 +4,7 @@ from backend.app.api.v1.routes import router as v1_router from backend.app.core.exceptions import ReportNotFoundException, AgentExecutionException from backend.app.core.logger import api_logger +from backend.app.core.orchestrator import create_orchestrator, Orchestrator from dotenv import load_dotenv @@ -11,6 +12,8 @@ app = FastAPI(title=settings.APP_NAME, debug=settings.DEBUG) +orchestrator_instance: Orchestrator | None = None + @app.exception_handler(ReportNotFoundException) async def report_not_found_exception_handler(request: Request, exc: ReportNotFoundException): api_logger.error(f"ReportNotFoundException: {exc.detail}") @@ -57,6 +60,12 @@ async def generic_exception_handler(request: Request, exc: Exception): app.include_router(v1_router, prefix="/api/v1") +@app.on_event("startup") +async def startup_event(): + global orchestrator_instance + orchestrator_instance = await create_orchestrator() + api_logger.info("Orchestrator instance initialized.") + @app.get("/health") async def health_check(): return {"status": "ok"} diff --git a/backend/tests/__pycache__/test_orchestrator.cpython-313-pytest-8.4.2.pyc b/backend/tests/__pycache__/test_orchestrator.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 8340d2fd..00000000 Binary files a/backend/tests/__pycache__/test_orchestrator.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.2.0.pyc b/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 8498aa2f..00000000 Binary files a/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.4.2.pyc b/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 2a2dcc16..00000000 Binary files a/backend/tests/__pycache__/test_orchestrator_config.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.2.0.pyc b/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 423d994d..00000000 Binary files a/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.4.2.pyc b/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 04e50306..00000000 Binary files a/backend/tests/__pycache__/test_orchestrator_integration.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc b/backend/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index fc9dab20..00000000 Binary files a/backend/tests/__pycache__/test_report_processor.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.2.0.pyc b/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.2.0.pyc deleted file mode 100644 index 3dc2e382..00000000 Binary files a/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.2.0.pyc and /dev/null differ diff --git a/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.4.2.pyc b/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.4.2.pyc deleted file mode 100644 index 71b19b0c..00000000 Binary files a/backend/tests/__pycache__/test_routes.cpython-313-pytest-8.4.2.pyc and /dev/null differ diff --git a/requirements.txt b/requirements.txt index 1f866035..dfe69738 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ fastapi==0.121.3 uvicorn==0.23.2 -SQLAlchemy==2.0.19 +SQLAlchemy==2.0.31 pydantic==2.8.0 pydantic-settings==2.11.0 python-dotenv==1.0.0 @@ -9,4 +9,8 @@ pytest-asyncio==0.24.0 httpx==0.25.0 alembic==1.12.0 ruff==0.1.4 -asyncpg==0.30.0 \ No newline at end of file +asyncpg==0.30.0 +tenacity==9.1.2 +textblob==0.19.0 +requests==2.32.5 +beautifulsoup4==4.14.2 \ No newline at end of file