From 55064c2a9423e855757ab9e6532274b9d50e19e0 Mon Sep 17 00:00:00 2001 From: Shahules786 Date: Thu, 8 Feb 2024 16:47:29 -0800 Subject: [PATCH 1/3] ensure dict type --- src/ragas/testset/evolutions.py | 7 +++++-- src/ragas/testset/extractor.py | 1 + src/ragas/testset/filters.py | 3 +++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/ragas/testset/evolutions.py b/src/ragas/testset/evolutions.py index 09802a188..90c275e63 100644 --- a/src/ragas/testset/evolutions.py +++ b/src/ragas/testset/evolutions.py @@ -180,9 +180,12 @@ async def generate_datarow( relevent_contexts_result = await json_loader.safe_load( results.generations[0][0].text.strip(), llm=self.generator_llm ) - relevant_context_indices = relevent_contexts_result.get( - "relevant_context", None + relevant_context_indices = ( + relevent_contexts_result.get("relevant_context", None) + if isinstance(relevent_contexts_result, dict) + else None ) + if relevant_context_indices is None: relevant_context = CurrentNodes( root_node=current_nodes.root_node, nodes=current_nodes.nodes diff --git a/src/ragas/testset/extractor.py b/src/ragas/testset/extractor.py index 77c586c2e..09154d8cf 100644 --- a/src/ragas/testset/extractor.py +++ b/src/ragas/testset/extractor.py @@ -50,6 +50,7 @@ async def extract(self, node: Node, is_async: bool = True) -> t.List[str]: keyphrases = await json_loader.safe_load( results.generations[0][0].text.strip(), llm=self.llm, is_async=is_async ) + keyphrases = keyphrases if isinstance(keyphrases, dict) else {} logger.debug("keyphrases: %s", keyphrases) return keyphrases.get("keyphrases", []) diff --git a/src/ragas/testset/filters.py b/src/ragas/testset/filters.py index 0eb06b77b..9d017b7e5 100644 --- a/src/ragas/testset/filters.py +++ b/src/ragas/testset/filters.py @@ -54,6 +54,7 @@ async def filter(self, node: Node) -> t.Dict: results = await self.llm.generate(prompt=prompt) output = results.generations[0][0].text.strip() score = await json_loader.safe_load(output, llm=self.llm) + score = score if isinstance(score, dict) else {} logger.debug("node filter: %s", score) score.update({"score": score.get("score", 0) >= self.threshold}) return score @@ -85,6 +86,7 @@ async def filter(self, question: str) -> bool: results = await self.llm.generate(prompt=prompt) results = results.generations[0][0].text.strip() json_results = await json_loader.safe_load(results, llm=self.llm) + json_results = json_results if isinstance(json_results, dict) else {} logger.debug("filtered question: %s", json_results) return json_results.get("verdict") == "1" @@ -117,6 +119,7 @@ async def filter(self, simple_question: str, compressed_question: str) -> bool: results = await self.llm.generate(prompt=prompt) results = results.generations[0][0].text.strip() json_results = await json_loader.safe_load(results, llm=self.llm) + json_results = json_results if isinstance(json_results, dict) else {} logger.debug("evolution filter: %s", json_results) return json_results.get("verdict") == "1" From 19cab75bd48b4c0181ecb35079ef4cc1bf226bc4 Mon Sep 17 00:00:00 2001 From: Kota Iwauchi Date: Fri, 9 Feb 2024 15:10:39 +0900 Subject: [PATCH 2/3] symple change for context precision --- src/ragas/metrics/_context_precision.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ragas/metrics/_context_precision.py b/src/ragas/metrics/_context_precision.py index c94132273..d3e6c0d57 100644 --- a/src/ragas/metrics/_context_precision.py +++ b/src/ragas/metrics/_context_precision.py @@ -18,7 +18,7 @@ CONTEXT_PRECISION = Prompt( name="context_precision", - instruction="""Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not. """, + instruction="""Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not with json output. """, examples=[ { "question": """What can you tell me about albert Albert Einstein?""", From da6d9ca916fa6f1cd1d4276a92e6b6652952191f Mon Sep 17 00:00:00 2001 From: Shahules786 Date: Fri, 9 Feb 2024 11:24:10 -0800 Subject: [PATCH 3/3] add JSON instruction --- src/ragas/llms/prompt.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/ragas/llms/prompt.py b/src/ragas/llms/prompt.py index b8be3cb1f..3bec5c9b4 100644 --- a/src/ragas/llms/prompt.py +++ b/src/ragas/llms/prompt.py @@ -91,7 +91,12 @@ def to_string(self) -> str: """ Generate the prompt string from the variables. """ - prompt_str = self.instruction + "\n" + added_json_instruction = ( + "\nOutput in only valid JSON format." + if self.output_type.lower() == "json" + else "" + ) + prompt_str = self.instruction + added_json_instruction + "\n" if self.examples: # Format the examples to match the Langchain prompt template