diff --git a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc index 62635c88..eb0cb5f0 100644 Binary files a/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc and b/backend/app/services/nlg/tests/__pycache__/test_nlg_engine.cpython-313-pytest-8.4.2.pyc differ diff --git a/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc b/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc new file mode 100644 index 00000000..22bc55c8 Binary files /dev/null and b/backend/app/services/nlg/tests/__pycache__/test_report_nlg_engine.cpython-313-pytest-8.4.2.pyc differ diff --git a/backend/app/services/nlg/tests/test_nlg_engine.py b/backend/app/services/nlg/tests/test_nlg_engine.py index 04ae62e4..29be220b 100644 --- a/backend/app/services/nlg/tests/test_nlg_engine.py +++ b/backend/app/services/nlg/tests/test_nlg_engine.py @@ -1,300 +1,217 @@ import pytest -import respx -from httpx import Response -import os import json -from typing import Dict, Any - +from unittest.mock import AsyncMock, patch from backend.app.services.nlg.nlg_engine import NLGEngine from backend.app.services.nlg.llm_client import LLMClient +from backend.app.services.nlg.prompt_templates import get_template, fill_template -# Concrete implementation of NLGEngine for testing +# Concrete implementation for testing purposes class ConcreteNLGEngine(NLGEngine): async def generate_section_text(self, section_id: str, raw_data: dict) -> str: - return self._format_output({"section_id": section_id, "text": "Mocked base section text."}) - - async def generate_full_report(self, data: dict) -> str: - sections = [] - sections.append(json.loads(await super().generate_tokenomics_text(data.get("tokenomics_data", {})))) - sections.append(json.loads(await super().generate_onchain_text(data.get("onchain_data", {})))) - sections.append(json.loads(await super().generate_sentiment_text(data.get("sentiment_data", {})))) - sections.append(json.loads(await super().generate_code_audit_text(data.get("code_data", {}), data.get("audit_data", {})))) - return self._format_output({"sections": sections}) - -# Mock the environment variable for testing -@pytest.fixture(autouse=True) -def mock_env_vars(): - os.environ["OPENAI_API_KEY"] = "test_api_key" - yield - del os.environ["OPENAI_API_KEY"] + # This is a placeholder implementation for the abstract method + return json.dumps({"section_id": section_id, "text": "Generated by ConcreteNLGEngine"}) + +# Mock the LLMClient for all tests in this module +@pytest.fixture +def mock_llm_client(): + with patch('backend.app.services.nlg.nlg_engine.LLMClient', autospec=True) as MockLLMClient: + mock_instance = MockLLMClient.return_value + mock_instance.__aenter__.return_value = mock_instance + mock_instance.__aexit__.return_value = None + mock_instance.generate_text = AsyncMock() + yield mock_instance + +@pytest.fixture +def nlg_engine(): + return ConcreteNLGEngine() @pytest.mark.asyncio -async def test_generate_onchain_text_success(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" +async def test_generate_tokenomics_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated tokenomics summary."}}] } - expected_llm_response = { - "choices": [{"message": {"content": "On-chain metrics show strong activity with 1000 active addresses and 500 holders. Transaction flows are at 10M USD and liquidity is 20M USD."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics show strong activity" in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"supply": "1B", "distribution": "fair"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "This is a generated tokenomics summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("tokenomics") + expected_prompt = fill_template( + expected_template, + data=json.dumps(raw_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_onchain_text_empty_data(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = {} - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics data is not available at this time." in parsed_response["text"] +async def test_generate_tokenomics_text_missing_data(nlg_engine): + result = await nlg_engine.generate_tokenomics_text({}) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Tokenomics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_incomplete_data(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - } - expected_llm_response = { - "choices": [{"message": {"content": "On-chain metrics show strong activity with 1000 active addresses and 500 holders. Transaction flows and liquidity data are not available."}}] +async def test_generate_tokenomics_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "On-chain metrics show strong activity" in parsed_response["text"] - assert "Transaction flows and liquidity data are not available." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"supply": "1B"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Failed to generate tokenomics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_llm_error(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "Failed to generate on-chain metrics summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_tokenomics_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"supply": "1B"} + + result = await nlg_engine.generate_tokenomics_text(raw_data) + expected_output = json.dumps({ + "section_id": "tokenomics", + "text": "Failed to generate tokenomics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_onchain_text_llm_empty_content(): - engine = ConcreteNLGEngine() # Use the concrete implementation - raw_data = { - "active_addresses": 1000, - "holders": 500, - "transaction_flows": "10M USD", - "liquidity": "20M USD" +async def test_generate_onchain_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated on-chain metrics summary."}}] } - expected_llm_response = { - "choices": [{"message": {"content": ""}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_onchain_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "onchain_metrics" - assert "Failed to generate on-chain metrics summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"active_addresses": 1000, "holders": 500, "transaction_flows": "high", "liquidity": "good"} + + result = await nlg_engine.generate_onchain_text(raw_data) + + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "This is a generated on-chain metrics summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("onchain_metrics") + expected_prompt = fill_template( + expected_template, + data=json.dumps({ + "active_addresses": 1000, + "holders": 500, + "transaction_flows": "high", + "liquidity": "good", + }, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_sentiment_text_success(): - engine = ConcreteNLGEngine() - raw_data = { - "overall_sentiment_score": 0.75, - "community_perception": "positive", - "trends": ["growing adoption", "strong community engagement"], - "direction": "upward" - } - expected_llm_response = { - "choices": [{"message": {"content": "Overall sentiment is highly positive (0.75) with a strong upward community direction. Key trends include growing adoption and strong community engagement."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Overall sentiment is highly positive (0.75)" in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_onchain_text_missing_data(nlg_engine): + result = await nlg_engine.generate_onchain_text({}) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "On-chain metrics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_sentiment_text_empty_data(): - engine = ConcreteNLGEngine() - raw_data = {} - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Social sentiment data is not available at this time." in parsed_response["text"] +async def test_generate_onchain_text_failed_status(nlg_engine): + result = await nlg_engine.generate_onchain_text({"status": "failed"}) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "On-chain metrics data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_sentiment_text_llm_error(): - engine = ConcreteNLGEngine() - raw_data = { - "overall_sentiment_score": 0.75, - "community_perception": "positive" +async def test_generate_onchain_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_sentiment_text(raw_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "social_sentiment" - assert "Failed to generate social sentiment summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 - - assert parsed_response["section_id"] == "social_sentiment" - assert "Failed to generate social sentiment summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"active_addresses": 1000} + + result = await nlg_engine.generate_onchain_text(raw_data) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "Failed to generate on-chain metrics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_success(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - expected_llm_response = { - "choices": [{"message": {"content": "Code audit summary: Clarity is good. High security risk identified. Recent code activity. Good repository quality."}}] - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Code audit summary: Clarity is good." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_onchain_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"active_addresses": 1000} + + result = await nlg_engine.generate_onchain_text(raw_data) + expected_output = json.dumps({ + "section_id": "onchain_metrics", + "text": "Failed to generate on-chain metrics summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_empty_data(): - engine = ConcreteNLGEngine() - code_data = {} - audit_data = {} - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Code audit and repository data are not available at this time." in parsed_response["text"] +async def test_generate_sentiment_text_success(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated social sentiment summary."}}] + } + raw_data = {"sentiment_score": 0.8, "trends": "positive"} + + result = await nlg_engine.generate_sentiment_text(raw_data) + + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "This is a generated social sentiment summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("social_sentiment") + expected_prompt = fill_template( + expected_template, + data=json.dumps(raw_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) @pytest.mark.asyncio -async def test_generate_code_audit_text_llm_error(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(500, text="Internal Server Error") - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Failed to generate code audit summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 +async def test_generate_sentiment_text_missing_data(nlg_engine): + result = await nlg_engine.generate_sentiment_text({}) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Social sentiment data is not available at this time. Please check back later for updates." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_code_audit_text_llm_empty_content(): - engine = ConcreteNLGEngine() - code_data = {"files": [{"name": "main.py", "lines": 100}]} - audit_data = {"risks": [{"type": "security", "severity": "high"}]} - expected_llm_response = { +async def test_generate_sentiment_text_empty_llm_response(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.return_value = { "choices": [{"message": {"content": ""}}] } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").return_value = Response(200, json=expected_llm_response) - - response = await engine.generate_code_audit_text(code_data, audit_data) - parsed_response = json.loads(response) - - assert parsed_response["section_id"] == "code_audit_summary" - assert "Failed to generate code audit summary due to an internal error." in parsed_response["text"] - assert respx_mock.calls.call_count == 1 + raw_data = {"sentiment_score": 0.8} + + result = await nlg_engine.generate_sentiment_text(raw_data) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Failed to generate social sentiment summary due to an internal error. Please try again later." + }) + assert result == expected_output @pytest.mark.asyncio -async def test_generate_full_report_success(): - engine = ConcreteNLGEngine() +async def test_generate_sentiment_text_llm_exception(mock_llm_client, nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"sentiment_score": 0.8} - mock_tokenomics_data = {"key": "value"} - mock_onchain_data = {"active_addresses": 1000} - mock_sentiment_data = {"score": 0.8} - mock_code_data = {"files": ["file1.py"]} - mock_audit_data = {"risks": ["high"]} - - full_report_data = { - "tokenomics_data": mock_tokenomics_data, - "onchain_data": mock_onchain_data, - "sentiment_data": mock_sentiment_data, - "code_data": mock_code_data, - "audit_data": mock_audit_data, - } - - with respx.mock as respx_mock: - respx_mock.post("https://api.openai.com/v1/chat/completions").side_effect = [ - Response(200, json={"choices": [{"message": {"content": "Mocked tokenomics text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked on-chain text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked sentiment text."}}]}), - Response(200, json={"choices": [{"message": {"content": "Mocked code audit text."}}]}), - ] - - response = await engine.generate_full_report(full_report_data) - parsed_response = json.loads(response) - - assert "sections" in parsed_response - assert len(parsed_response["sections"]) == 4 - - section_ids = [s["section_id"] for s in parsed_response["sections"]] - assert "tokenomics" in section_ids - assert "onchain_metrics" in section_ids - assert "social_sentiment" in section_ids - assert "code_audit_summary" in section_ids - - for section in parsed_response["sections"]: - if section["section_id"] == "tokenomics": - assert "Mocked tokenomics text." in section["text"] - elif section["section_id"] == "onchain_metrics": - assert "Mocked on-chain text." in section["text"] - elif section["section_id"] == "social_sentiment": - assert "Mocked sentiment text." in section["text"] - elif section["section_id"] == "code_audit_summary": - assert "Mocked code audit text." in section["text"] - - assert respx_mock.calls.call_count == 4 + result = await nlg_engine.generate_sentiment_text(raw_data) + expected_output = json.dumps({ + "section_id": "social_sentiment", + "text": "Failed to generate social sentiment summary due to an internal error. Please try again later." + }) + assert result == expected_output diff --git a/backend/app/services/nlg/tests/test_report_nlg_engine.py b/backend/app/services/nlg/tests/test_report_nlg_engine.py new file mode 100644 index 00000000..9cd2a133 --- /dev/null +++ b/backend/app/services/nlg/tests/test_report_nlg_engine.py @@ -0,0 +1,143 @@ +import pytest +import json +from unittest.mock import AsyncMock, patch +from backend.app.services.nlg.report_nlg_engine import ReportNLGEngine +from backend.app.services.nlg.llm_client import LLMClient +from backend.app.services.nlg.prompt_templates import get_template, fill_template + +# Mock the LLMClient for all tests in this module +@pytest.fixture +def mock_llm_client(): + with patch('backend.app.services.nlg.report_nlg_engine.LLMClient', autospec=True) as MockLLMClient: + mock_instance = MockLLMClient.return_value + mock_instance.__aenter__.return_value = mock_instance + mock_instance.__aexit__.return_value = None + mock_instance.generate_text = AsyncMock() + yield mock_instance + +@pytest.fixture +def report_nlg_engine(): + return ReportNLGEngine() + +@pytest.mark.asyncio +async def test_generate_code_audit_text_success(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated code audit summary."}}] + } + code_data = {"lines": 100, "files": 10} + audit_data = [{"finding": "High severity bug"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "This is a generated code audit summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("code_audit_summary") + expected_prompt = fill_template( + expected_template, + code_data=json.dumps(code_data, indent=2), + audit_data=json.dumps(audit_data, indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) + +@pytest.mark.asyncio +async def test_generate_code_audit_text_missing_data(report_nlg_engine): + result = await report_nlg_engine.generate_code_audit_text({}, []) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Code audit and repository data are not available at this time. Please check back later for updates." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_code_audit_text_empty_llm_response(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] + } + code_data = {"lines": 100} + audit_data = [{"finding": "Low"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Failed to generate code audit summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_code_audit_text_llm_exception(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + code_data = {"lines": 100} + audit_data = [{"finding": "Low"}] + + result = await report_nlg_engine.generate_code_audit_text(code_data, audit_data) + expected_output = json.dumps({ + "section_id": "code_audit_summary", + "text": "Failed to generate code audit summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_success(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": "This is a generated team documentation summary."}}] + } + raw_data = {"team_analysis": ["Strong team"], "whitepaper_summary": {"version": "1.0"}} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "This is a generated team documentation summary." + }) + assert result == expected_output + mock_llm_client.generate_text.assert_called_once() + + # Validate prompt correctness + expected_template = get_template("team_documentation") + expected_prompt = fill_template( + expected_template, + team_analysis=json.dumps(raw_data["team_analysis"], indent=2), + whitepaper_summary=json.dumps(raw_data["whitepaper_summary"], indent=2) + ) + mock_llm_client.generate_text.assert_called_with(expected_prompt) + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_missing_data(report_nlg_engine): + result = await report_nlg_engine.generate_team_documentation_text({}) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Team and documentation data is not available at this time. Please check back later for updates." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_empty_llm_response(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.return_value = { + "choices": [{"message": {"content": ""}}] + } + raw_data = {"team_analysis": ["Strong team"]} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Failed to generate team and documentation summary due to an internal error. Please try again later." + }) + assert result == expected_output + +@pytest.mark.asyncio +async def test_generate_team_documentation_text_llm_exception(mock_llm_client, report_nlg_engine): + mock_llm_client.generate_text.side_effect = Exception("LLM connection error") + raw_data = {"team_analysis": ["Strong team"]} + + result = await report_nlg_engine.generate_team_documentation_text(raw_data) + expected_output = json.dumps({ + "section_id": "team_documentation", + "text": "Failed to generate team and documentation summary due to an internal error. Please try again later." + }) + assert result == expected_output