Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified backend/app/api/v1/__pycache__/routes.cpython-313.pyc
Binary file not shown.
Binary file modified backend/app/core/__pycache__/orchestrator.cpython-313.pyc
Binary file not shown.
27 changes: 16 additions & 11 deletions backend/app/core/orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,12 @@ async def execute_agents(self, report_id: str, token_id: str) -> Dict[str, Any]:
for name, task in tasks.items():
try:
result = await asyncio.wait_for(task, timeout=settings.AGENT_TIMEOUT) # Added timeout
results[name] = {"status": "completed", "data": result}
orchestrator_logger.debug(f"Agent {name} task result: {result}")
# Check if the agent itself returned a status
if isinstance(result, dict) and "status" in result:
results[name] = result
else:
results[name] = {"status": "completed", "data": result}
orchestrator_logger.info(f"Agent {name} completed for report {report_id}.")
except asyncio.TimeoutError: # Handle timeout specifically
orchestrator_logger.exception("Agent %s timed out for report %s", name, report_id)
Expand Down Expand Up @@ -159,17 +164,17 @@ async def onchain_data_agent(report_id: str, token_id: str) -> Dict[str, Any]:
# Handle individual task results and exceptions
if isinstance(onchain_metrics_result, asyncio.TimeoutError):
orchestrator_logger.error("Onchain metrics fetch timed out for report %s", report_id)
onchain_metrics_result = {"error": "Onchain metrics fetch timed out"}
onchain_metrics_result = {"status": "failed", "error": "Onchain metrics fetch timed out"}
elif isinstance(onchain_metrics_result, Exception):
orchestrator_logger.error("Onchain metrics fetch failed for report %s", report_id)
onchain_metrics_result = {"error": str(onchain_metrics_result)}
onchain_metrics_result = {"status": "failed", "error": str(onchain_metrics_result)}

if isinstance(tokenomics_result, asyncio.TimeoutError):
orchestrator_logger.error("Tokenomics fetch timed out for report %s", report_id)
tokenomics_result = {"error": "Tokenomics fetch timed out"}
tokenomics_result = {"status": "failed", "error": "Tokenomics fetch timed out"}
elif isinstance(tokenomics_result, Exception):
orchestrator_logger.error("Tokenomics fetch failed for report %s", report_id)
tokenomics_result = {"error": str(tokenomics_result)}
tokenomics_result = {"status": "failed", "error": str(tokenomics_result)}

return {
"onchain_metrics": onchain_metrics_result,
Expand Down Expand Up @@ -197,10 +202,10 @@ async def social_sentiment_agent_func(report_id: str, token_id: str) -> Dict[str
orchestrator_logger.info(f"Social Sentiment Agent completed for report {report_id}.")
except asyncio.TimeoutError:
orchestrator_logger.error("Social Sentiment Agent timed out for report %s", report_id)
social_sentiment_data = {"social_sentiment": {"error": "Agent timed out"}}
social_sentiment_data = {"social_sentiment": {"status": "failed", "error": "Agent timed out"}}
except Exception as e:
orchestrator_logger.exception("Social Sentiment Agent failed for report %s", report_id)
social_sentiment_data = {"social_sentiment": {"error": str(e)}}
social_sentiment_data = {"social_sentiment": {"status": "failed", "error": str(e)}}
return social_sentiment_data
orch.register_agent('social_sentiment_agent', social_sentiment_agent_func)

Expand Down Expand Up @@ -239,10 +244,10 @@ async def team_documentation_agent(report_id: str, token_id: str) -> Dict[str, A

except asyncio.TimeoutError:
orchestrator_logger.error("Team and Documentation Agent timed out for report %s", report_id)
return {"team_documentation": {"error": "Agent timed out"}}
return {"team_documentation": {"status": "failed", "error": "Agent timed out"}}
except Exception as e:
orchestrator_logger.exception("Team and Documentation Agent failed for report %s", report_id)
return {"team_documentation": {"error": str(e)}}
return {"team_documentation": {"status": "failed", "error": str(e)}}

return {
"team_documentation": {
Expand Down Expand Up @@ -287,10 +292,10 @@ async def code_audit_agent_func(report_id: str, token_id: str) -> Dict[str, Any]

except asyncio.TimeoutError:
orchestrator_logger.error("Code/Audit Agent timed out for report %s", report_id)
return {"code_audit": {"error": "Agent timed out", "code_metrics": code_metrics_data, "audit_summary": audit_summary_data}}
return {"code_audit": {"status": "failed", "error": "Agent timed out", "code_metrics": code_metrics_data, "audit_summary": audit_summary_data}}
except Exception as e:
orchestrator_logger.exception("Code/Audit Agent failed for report %s", report_id)
return {"code_audit": {"error": str(e), "code_metrics": code_metrics_data, "audit_summary": audit_summary_data}}
return {"code_audit": {"status": "failed", "error": str(e), "code_metrics": code_metrics_data, "audit_summary": audit_summary_data}}

return {
"code_audit": {
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
14 changes: 8 additions & 6 deletions backend/app/services/agents/tests/test_code_audit_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,14 @@ async def test_audit_codebase(code_audit_agent):
respx.get(f"https://api.github.com/search/issues?q=repo%3A{owner}%2F{repo}%2Btype%3Aissue&per_page=1").mock(return_value=Response(200, json={'total_count': 20}))
respx.get(f"https://api.github.com/repos/{owner}/{repo}/pulls?state=all&per_page=1").mock(return_value=Response(200, headers={'link': '<https://api.github.com/repositories/1296269/pulls?state=all&per_page=1&page=2>; rel="next", <https://api.github.com/repositories/1296269/pulls?state=all&per_page=1&page=15>; rel="last"'}, json=[]))

result = await code_audit_agent.audit_codebase(repo_url, project_name)

assert isinstance(result, CodeAuditResult)
assert result.code_metrics.repo_url == repo_url
assert len(result.audit_summaries) == 2
assert project_name in result.audit_summaries[0].report_title
result = await code_audit_agent.fetch_data(repo_url, project_name)

assert isinstance(result, dict)
assert "code_metrics" in result
assert result["code_metrics"]["repo_url"] == repo_url
assert "audit_summaries" in result
assert len(result["audit_summaries"]) == 2
assert project_name in result["audit_summaries"][0]["report_title"]

@pytest.mark.asyncio
async def test_fetch_github_repo_metrics_http_error(code_audit_agent):
Expand Down
18 changes: 9 additions & 9 deletions backend/app/services/agents/tests/test_onchain_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ async def test_fetch_tokenomics_retry_on_rate_limit(mock_async_client):
with patch.object(fetch_tokenomics.retry, 'wait', new=wait_fixed(0.01)), \
patch.object(fetch_tokenomics.retry, 'stop', new=stop_after_attempt(3)):

result = await fetch_tokenomics(url="http://test.com/tokenomics")
result = await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert result == {"data": "tokenomics"}
assert mock_client_instance.get.call_count == 3

Expand All @@ -161,7 +161,7 @@ async def test_fetch_tokenomics_retry_on_timeout(mock_async_client):
with patch.object(fetch_tokenomics.retry, 'wait', new=wait_fixed(0.01)), \
patch.object(fetch_tokenomics.retry, 'stop', new=stop_after_attempt(3)):

result = await fetch_tokenomics(url="http://test.com/tokenomics")
result = await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert result == {"data": "tokenomics"}
assert mock_client_instance.get.call_count == 3

Expand All @@ -182,7 +182,7 @@ async def test_fetch_tokenomics_max_retries_exceeded(mock_async_client):
patch.object(fetch_tokenomics.retry, 'stop', new=stop_after_attempt(3)):

with pytest.raises(OnchainAgentNetworkError):
await fetch_tokenomics(url="http://test.com/tokenomics")
await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert mock_client_instance.get.call_count == 3

@pytest.mark.asyncio
Expand Down Expand Up @@ -234,7 +234,7 @@ async def test_fetch_tokenomics_http_error_raises_onchainagenthttperror(mock_asy
with patch.object(fetch_tokenomics.retry, 'wait', new=wait_fixed(0.01)), \
patch.object(fetch_tokenomics.retry, 'stop', new=stop_after_attempt(3)):
with pytest.raises(OnchainAgentHTTPError) as excinfo:
await fetch_tokenomics(url="http://test.com/tokenomics")
await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert excinfo.value.status_code == 403
assert mock_client_instance.get.call_count == 3 # Retries should still happen

Expand All @@ -252,7 +252,7 @@ async def test_fetch_tokenomics_unexpected_error_raises_onchainagentexception(mo
with patch.object(fetch_tokenomics.retry, 'wait', new=wait_fixed(0.01)), \
patch.object(fetch_tokenomics.retry, 'stop', new=stop_after_attempt(3)):
with pytest.raises(OnchainAgentException):
await fetch_tokenomics(url="http://test.com/tokenomics")
await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert mock_client_instance.get.call_count == 3 # Retries should still happen

# --- New tests for successful fetching and schema validation ---
Expand Down Expand Up @@ -297,7 +297,7 @@ async def test_fetch_tokenomics_success_and_schema(mock_async_client):
}
mock_client_instance.get.return_value = create_mock_response(200, expected_tokenomics)

result = await fetch_tokenomics(url="http://test.com/tokenomics")
result = await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert result == expected_tokenomics
assert "total_supply" in result
assert "circulating_supply" in result
Expand Down Expand Up @@ -346,7 +346,7 @@ async def test_fetch_tokenomics_missing_fields(mock_async_client):
}
mock_client_instance.get.return_value = create_mock_response(200, incomplete_tokenomics)

result = await fetch_tokenomics(url="http://test.com/tokenomics")
result = await fetch_tokenomics(url="http://test.com/tokenomics", token_id="test_token")
assert result == incomplete_tokenomics
assert "total_supply" in result
assert "circulating_supply" not in result
Expand Down Expand Up @@ -381,5 +381,5 @@ async def test_fetch_tokenomics_invalid_token_id(mock_async_client):
mock_client_instance.get.return_value = create_mock_response(404, error_response_data)

with pytest.raises(OnchainAgentHTTPError) as excinfo:
await fetch_tokenomics(url="http://test.com/tokenomics", params={"token_id": "nonexistent"})
assert excinfo.value.status_code == 404
await fetch_tokenomics(url="http://test.com/tokenomics", params={"token_id": "nonexistent"}, token_id="test_token")
assert excinfo.value.status_code == 404
Binary file modified backend/app/services/nlg/__pycache__/llm_client.cpython-313.pyc
Binary file not shown.
78 changes: 78 additions & 0 deletions backend/app/services/nlg/prompt_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
"""
This module contains prompt templates for various report sections and a utility function
to dynamically fill these templates with data.
"""

def get_template(section_id: str) -> str:
"""
Retrieves a prompt template based on the section ID.
"""
templates = {
"tokenomics": """
Analyze the following tokenomics data and provide a comprehensive summary,
highlighting key aspects such as token distribution, vesting schedules,
inflation/deflation mechanisms, and any potential risks or advantages.
Focus on how these factors impact the long-term value and stability of the token.

Tokenomics Data:
{data}
""",
"onchain_metrics": """
Examine the provided on-chain metrics and generate an insightful analysis.
Cover aspects like active addresses, transaction volume, whale activity,
and network growth. Explain the implications of these metrics for the
project's health and adoption.

On-chain Metrics Data:
{data}
""",
"sentiment": """
Review the social sentiment data and summarize the overall market perception
of the project. Identify key themes, positive or negative trends, and
any significant events influencing sentiment. Discuss the potential impact
of this sentiment on the project's future.

Sentiment Data:
{data}
""",
"team_analysis": """
Analyze the team's background, experience, and contributions based on the
provided data. Assess the team's capability to execute the project roadmap
and highlight any strengths or weaknesses.

Team Analysis Data:
{data}
""",
"documentation": """
Evaluate the quality and completeness of the project's documentation.
Identify areas of excellence and areas needing improvement. Discuss how
effective documentation contributes to user adoption and developer engagement.

Documentation Data:
{data}
""",
"code_audit": """
Summarize the findings from the code audit report. Highlight critical
vulnerabilities, security best practices followed, and overall code quality.
Explain the implications of these findings for the project's security and reliability.

Code Audit Data:
{data}
""",
"risk_factors": """
Based on the provided data, identify and elaborate on the key risk factors
associated with the project. Categorize risks (e.g., technical, market, regulatory)
and discuss their potential impact and mitigation strategies.

Risk Factors Data:
{data}
"""
}
return templates.get(section_id, "No template found for this section ID.")

def fill_template(template: str, **kwargs) -> str:
"""
Fills a given template with the provided data using keyword arguments.
This allows for flexible placeholder names in the template.
"""
return template.format(**kwargs)
Binary file not shown.
Loading