Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ dependencies = [
"pydantic",
"numpy>=2", # pinned to avoid incompatibilities
"hf-xet>=1.1.8", # pinned to avoid failing test suite
# Prettiness
"scipy>=1.7.0", # for sparse matrix handling specific to scicode benchmark
"typer>=0.20.0",
"termcolor==2.3.0",
"pytablewriter",
Expand All @@ -90,6 +90,7 @@ dependencies = [
"httpx>=0.27.2",
"latex2sympy2_extended==1.0.6",
"langcodes",
"h5py", # for handling h5 files e.g. scicode benchmark
]

[project.optional-dependencies]
Expand Down
10 changes: 10 additions & 0 deletions src/lighteval/tasks/tasks/scicode/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
"""SciCode benchmark implementation for Lighteval.

Based on the original SciCode implementation:
https://github.com/scicode-bench/SciCode/blob/main/eval/inspect_ai/scicode.py
"""

from lighteval.tasks.tasks.scicode.main import TASKS_TABLE, scicode


__all__ = ["scicode", "TASKS_TABLE"]
106 changes: 106 additions & 0 deletions src/lighteval/tasks/tasks/scicode/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
"""
name:
SciCode

dataset:
SciCode1/SciCode

abstract:
SciCode is a challenging benchmark designed to evaluate the capabilities of language models (LMs)
in generating code for solving realistic scientific research problems. It has a diverse coverage of
16 subdomains from 6 domains: Physics, Math, Material Science, Biology, and Chemistry. Unlike previous
benchmarks that consist of exam-like question-answer pairs, SciCode is converted from real research problems.
SciCode problems naturally factorize into multiple subproblems, each involving knowledge recall, reasoning,
and code synthesis. In total, SciCode contains 338 subproblems decomposed from 80 challenging main problems.

languages:
english

tags:
code-generation, scientific-computing

paper:
https://arxiv.org/abs/2407.13168

starred:
true
"""

from typing import Any

from inspect_ai.dataset import Sample

from lighteval.tasks.lighteval_task import LightevalTaskConfig
from lighteval.tasks.requests import Doc
from lighteval.tasks.tasks.scicode.prompts import prepare_scicode_prompt
from lighteval.tasks.tasks.scicode.scorer import scicode_scorer
from lighteval.tasks.tasks.scicode.solver import scicode_solver
from lighteval.tasks.tasks.scicode.utils import _extract_first_step_metadata


def scicode_prompt(line: dict[str, Any], task_name: str = "scicode") -> Doc:
"""Convert dataset record to Doc for evaluation.

For multi-step evaluation, this returns the first step's prompt.
The solver will handle subsequent steps.
"""
step_metadata = _extract_first_step_metadata(line)
step_data = step_metadata["step_data"]
query = prepare_scicode_prompt(step_data, line, with_background=False)

return Doc(
task_name=task_name,
query=query,
choices=[""],
gold_index=0,
specific={
"test_cases": step_metadata["test_cases"],
"function_header": step_metadata["function_header"],
"fn_name": step_metadata["fn_name"],
"step_number": step_metadata["step_number"],
"problem_id": line.get("problem_id"),
"required_dependencies": line.get("required_dependencies", ""),
},
)


def record_to_sample(record: dict[str, Any]) -> Sample:
"""Convert dataset record to inspect-ai Sample object.

Includes ALL sub_steps in metadata for multi-step processing.
"""
step_metadata = _extract_first_step_metadata(record)
step_data = step_metadata["step_data"]

metadata = dict(record)
metadata.update(
{
"test_cases": step_metadata["test_cases"],
"function_header": step_metadata["function_header"],
"fn_name": step_metadata["fn_name"],
"step_number": step_metadata["step_number"],
}
)

prompt = prepare_scicode_prompt(step_data, record, with_background=False)

return Sample(input=prompt, metadata=metadata)


scicode = LightevalTaskConfig(
name="scicode",
prompt_function=scicode_prompt,
sample_fields=record_to_sample,
solver=scicode_solver(with_background=False),
scorer=scicode_scorer(),
hf_repo="SciCode1/SciCode",
hf_subset="default",
hf_avail_splits=["test", "validation"],
evaluation_splits=["test"],
generation_size=32768,
metrics=[], # Metrics are defined in the scorer decorator for inspect_ai
stop_sequence=[], # no stop sequence, will use EOS token
version=0,
)

TASKS_TABLE = [scicode]
133 changes: 133 additions & 0 deletions src/lighteval/tasks/tasks/scicode/parse.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
"""Parsing utilities for SciCode.

Based on original implementation:
https://github.com/scicode-bench/SciCode
"""

import ast
import re
from pathlib import Path

import h5py
import scipy.sparse


def extract_function_name(function_header: str) -> str:
"""Extract function or class name from function header."""
pattern = r"\bdef\s+(\w+)\s*\("
match = re.search(pattern, function_header)
if match:
return match.group(1)

pattern = r"\bclass\s+(\w+)\s*[\(:]"
match = re.search(pattern, function_header)
if match:
return match.group(1)

raise ValueError(f"Function name or class name not found in: {function_header}")


def get_function_from_code(code_string: str, function_name: str) -> str:
"""Extract specific function/class from code using AST."""
if code_string is None:
return ""
try:
tree = ast.parse(code_string)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.ClassDef)) and node.name == function_name:
return ast.unparse(node)
except Exception:
return code_string
return None


def _process_hdf5_sparse_matrix(group: h5py.Group):
"""Process an h5py Group containing sparse matrix data."""
data = group["data"][()]
shape = tuple(group["shape"][()])
if "row" in group and "col" in group:
row = group["row"][()]
col = group["col"][()]
return scipy.sparse.coo_matrix((data, (row, col)), shape=shape)
elif "blocksize" in group:
indices = group["indices"][()]
indptr = group["indptr"][()]
blocksize = tuple(group["blocksize"][()])
return scipy.sparse.bsr_matrix((data, indices, indptr), shape=shape, blocksize=blocksize)
else:
indices = group["indices"][()]
indptr = group["indptr"][()]
return scipy.sparse.csr_matrix((data, indices, indptr), shape=shape)


def _process_hdf5_list(group: h5py.Group) -> list:
"""Process an h5py Group containing list data."""
result_list = []
for key in group.keys():
result_list.append(group[key][()])
return result_list


def _process_hdf5_dict(group: h5py.Group) -> dict:
"""Process an h5py Group into a dictionary."""
result_dict = {}
for key, obj in group.items():
if isinstance(obj, h5py.Group):
if "sparse_matrix" in obj:
result_dict[key] = _process_hdf5_sparse_matrix(obj["sparse_matrix"])
else:
result_dict[key] = _process_hdf5_datagroup(obj)
elif isinstance(obj, h5py.Dataset):
if isinstance(obj[()], bytes):
result_dict[key] = obj[()].decode("utf-8", errors="strict")
else:
try:
tmp = float(key)
result_dict[tmp] = obj[()]
except ValueError:
result_dict[key] = obj[()]
return result_dict


def _process_hdf5_datagroup(group: h5py.Group):
"""Process an h5py Group, handling special cases (list, sparse_matrix) or dict."""
if "list" in group:
return _process_hdf5_list(group["list"])
elif "sparse_matrix" in group:
return _process_hdf5_sparse_matrix(group["sparse_matrix"])
else:
return _process_hdf5_dict(group)


def extract_targets(step_id: str, num_tests: int, h5py_file: str | Path) -> tuple:
"""Extract target values from h5py file for a given step."""
if isinstance(step_id, tuple):
step_id = ".".join(str(x) for x in step_id)
elif not isinstance(step_id, str):
step_id = str(step_id)

with h5py.File(h5py_file, "r") as f:
if step_id not in f:
raise ValueError(f"Step {step_id} not found in h5py file")
targets = []
for i in range(1, num_tests + 1):
group_path = f"{step_id}/test{i}"

try:
if group_path not in f:
continue

group = f[group_path]

if "var1" in group:
var1 = group["var1"]
if isinstance(var1, h5py.Dataset):
target = var1[()]
targets.append(target)
elif isinstance(var1, h5py.Group):
target = _process_hdf5_datagroup(var1)
targets.append(target)
except Exception:
raise

return tuple(targets)
Loading