From db228c2652af72e9713c6ce8bf6b2b29bb8f3a40 Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Wed, 26 Jun 2024 14:21:06 -0400 Subject: [PATCH] add cpu support for MMLU bench simple_evaluate has a device option that seems to default to cuda. People with 128GB or even ~90GB+ of ram should be able to run eval on CPU. Add auto detection for if torch is available Signed-off-by: Charlie Doern --- src/instructlab/eval/mmlu.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/instructlab/eval/mmlu.py b/src/instructlab/eval/mmlu.py index 5589abb4..8ae4eb73 100644 --- a/src/instructlab/eval/mmlu.py +++ b/src/instructlab/eval/mmlu.py @@ -6,6 +6,7 @@ # Third Party from lm_eval.evaluator import simple_evaluate # type: ignore from lm_eval.tasks import TaskManager # type: ignore +import torch # First Party from instructlab.eval.evaluator import Evaluator @@ -58,6 +59,7 @@ def run(self) -> tuple: tasks=self.tasks, num_fewshot=self.few_shots, batch_size=self.batch_size, + device=("cuda" if torch.cuda.is_available() else "cpu"), ) results = mmlu_output["results"]