Skip to content

ValidationError #1545

@amin-kh96

Description

@amin-kh96

i saw this error :

value is not a valid list (type=type_error.list))
Evaluating:  33%|█████████████████████████████▎                                                          | 1/3 [01:37<03:14, 97.11s/it]Exception raised in Job[0]: ValidationError(1 validation error for LLMResult
generations -> 0
  value is not a valid list (type=type_error.list))
Evaluating:  67%|██████████████████████████████████████████████████████████▋                             | 2/3 [02:20<01:05, 65.71s/it]Exception raised in Job[1]: ValidationError(1 validation error for LLMResult
generations -> 0
  value is not a valid list (type=type_error.list))
Evaluating: 100%|████████████████████████████████████████████████████████████████████████████████████████| 3/3 [02:37<00:00, 52.47s/it] 
RAGAS Evaluation Report:
{'context_utilization': nan}
PS C:\Users\Amin\git\ragas-prototype> 

I think the cause of the error might be some where here:

# Define the custom LLM class
class CustomRagasLLM(BaseRagasLLM):
    def __init__(self, api_key: str = None):
        """
        Initialize the custom LLM, optionally using an API key if necessary.
        """
        self.api_key = api_key

    def _call(self, prompt: str) -> str:
        """
        Process the prompt and return a result. This can be customized to
        use a local model or perform any required logic.
        """
        if not self.api_key:
            return f"Processed: {prompt} (without API key)"
        else:
            # Handle LLM response if using an API
            return f"Processed: {prompt} (with API key: {self.api_key})"
       
    def generate_text(
        self,
        prompt: PromptValue,
        n: int = 1,
        temperature: float = 1e-8,
        stop: t.Optional[t.List[str]] = None,
        callbacks: t.List = []
    ) -> LLMResult:
        # Synchronous generation logic
        text = self._call(prompt)
        return LLMResult(generations=[Generation(text=text)])
    async def agenerate_text(
        self,
        prompt: PromptValue,
        n: int = 1,
        temperature: float = 1e-8,
        stop: t.Optional[t.List[str]] = None,
        callbacks: t.List = []
    ) -> LLMResult:
        """
        Asynchronous method to generate text. This should allow for async processing.
        """
        # Simulate an asynchronous call, here we directly call the sync method for now
        text = self._call(prompt)
        return LLMResult(generations=[Generation(text=text)])     
    
    
custom_llm =CustomRagasLLM(api_key=None) 

this is code on the first line maybe it helps for better understanding:

#creating a dataset of str type
new_data_set = []
question=[]
context=[]
answer=[]
 #extarcting str data of 'question' and 'answer'
for item in llm:
    if item['role'] == 'user':
       for c in item['content']:
           question.append(c['text'])
    else:
        for c in item['content']:
            answer.append(c['text'])

# Iterate through each dictionary in your data
for item in ground_truth_data:
    # Check if 'content' key exists in the dictionary
    if 'content' in item:
        # Access the value of the 'content' key and append it to the context list
        context.append(item['content'])
    else:
        print(f"'content' key not found in item with id: {item.get('id')}")

# Check the length of context to see if anything was appended
print(f"Number of context entries extracted: {len(context)}")

# Initialize empty lists for dataset
new_ragas_dataset = {
    "question": [],
    "contexts": [],
    "answer": []
}

# Assuming question, context, and answer lists are already available
for i in range(len(question)):
    new_ragas_dataset['question'].append(question[i])
    
    # For now, we assign all the chunks (contexts) to each question
    new_ragas_dataset['contexts'].append(context)  # context is a list of chunks
    
    # Assign the corresponding answer
    new_ragas_dataset['answer'].append(answer[i])

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't workingquestionFurther information is requested

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions