diff --git a/src/ragas/llms/prompt.py b/src/ragas/llms/prompt.py index b8be3cb1f..3bec5c9b4 100644 --- a/src/ragas/llms/prompt.py +++ b/src/ragas/llms/prompt.py @@ -91,7 +91,12 @@ def to_string(self) -> str: """ Generate the prompt string from the variables. """ - prompt_str = self.instruction + "\n" + added_json_instruction = ( + "\nOutput in only valid JSON format." + if self.output_type.lower() == "json" + else "" + ) + prompt_str = self.instruction + added_json_instruction + "\n" if self.examples: # Format the examples to match the Langchain prompt template diff --git a/src/ragas/metrics/_context_precision.py b/src/ragas/metrics/_context_precision.py index c94132273..d3e6c0d57 100644 --- a/src/ragas/metrics/_context_precision.py +++ b/src/ragas/metrics/_context_precision.py @@ -18,7 +18,7 @@ CONTEXT_PRECISION = Prompt( name="context_precision", - instruction="""Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not. """, + instruction="""Given question, answer and context verify if the context was useful in arriving at the given answer. Give verdict as "1" if useful and "0" if not with json output. """, examples=[ { "question": """What can you tell me about albert Albert Einstein?""",