diff --git a/src/ragas/metrics/_context_recall.py b/src/ragas/metrics/_context_recall.py index 57069119b..488678079 100644 --- a/src/ragas/metrics/_context_recall.py +++ b/src/ragas/metrics/_context_recall.py @@ -58,6 +58,16 @@ "Attributed": "1", }, }, + { + "question": """What is the primary fuel for the Sun?""", + "context": """NULL""", + "answer": """Hydrogen""", + "classification": { + "statement_1": "The Sun's primary fuel is hydrogen.", + "reason": "The context contains no information", + "Attributed": "0", + }, + }, ], input_keys=["question", "context", "answer"], output_key="classification", diff --git a/src/ragas/metrics/_faithfulness.py b/src/ragas/metrics/_faithfulness.py index 9e10c54af..b03ca4f2b 100644 --- a/src/ragas/metrics/_faithfulness.py +++ b/src/ragas/metrics/_faithfulness.py @@ -187,7 +187,7 @@ async def _ascore( is_async=is_async, ) - assert isinstance(statements, dict), "Invalid JSON response" + statements = statements if isinstance(statements, dict) else {} p = self._create_nli_prompt(row, statements.get("statements", [])) nli_result = await self.llm.generate(p, callbacks=callbacks, is_async=is_async) json_output = await json_loader.safe_load(