diff --git a/scoreboard/README.md b/scoreboard/README.md new file mode 100644 index 000000000..69cadd6c1 --- /dev/null +++ b/scoreboard/README.md @@ -0,0 +1,34 @@ +# Scoreboard + +HTML scoreboard generator for parallel programming tasks. + +## Usage + +```bash +# Install dependencies +pip install -r requirements.txt + +# Generate scoreboard +python main.py -o output_directory +``` + +Generates `output_directory/index.html` with the scoreboard. + +## Configuration + +- `data/threads-config.yml` - Task points, deadlines, penalties +- `data/plagiarism.yml` - Flagged submissions + +## Testing + +```bash +# Install test dependencies +pip install -r tests/requirements.txt + +# Run tests +python -m pytest tests/ -v +``` + +## Output + +HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), P (plagiarism), Total. diff --git a/scoreboard/main.py b/scoreboard/main.py index 9dacbb487..93c2ee401 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -48,14 +48,16 @@ def load_performance_data(perf_stat_file_path): with open(perf_stat_file_path, "r", newline="") as csvfile: reader = csv.DictReader(csvfile) for row in reader: - perf_stats[row["Task"]] = { - "seq": row["SEQ"], - "omp": row["OMP"], - "tbb": row["TBB"], - "stl": row["STL"], - "all": row["ALL"], - "mpi": "N/A", - } + task_name = row.get("Task") + if task_name: + perf_stats[task_name] = { + "seq": row.get("SEQ", "?"), + "omp": row.get("OMP", "?"), + "tbb": row.get("TBB", "?"), + "stl": row.get("STL", "?"), + "all": row.get("ALL", "?"), + "mpi": "N/A", + } else: logger.warning("Performance stats CSV not found at %s", perf_stat_file_path) return perf_stats @@ -67,7 +69,9 @@ def calculate_performance_metrics(perf_val, eff_num_proc): efficiency = "?" try: perf_float = float(perf_val) - if perf_float > 0: + if perf_float > 0 and not ( + perf_float == float("inf") or perf_float != perf_float + ): speedup = 1.0 / perf_float acceleration = f"{speedup:.2f}" efficiency = f"{speedup / eff_num_proc * 100:.2f}%" @@ -92,9 +96,10 @@ def check_plagiarism_and_calculate_penalty( dir, task_type, sol_points, plagiarism_cfg, cfg ): """Check if task is plagiarized and calculate penalty points.""" + clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir is_cheated = ( dir in plagiarism_cfg["plagiarism"][task_type] - or dir.rstrip("_disabled") in plagiarism_cfg["plagiarism"][task_type] + or clean_dir in plagiarism_cfg["plagiarism"][task_type] ) plagiarism_points = 0 if is_cheated: diff --git a/scoreboard/tests/conftest.py b/scoreboard/tests/conftest.py new file mode 100644 index 000000000..0b257fde3 --- /dev/null +++ b/scoreboard/tests/conftest.py @@ -0,0 +1,144 @@ +""" +Pytest configuration and shared fixtures for scoreboard tests. +""" + +import pytest +import tempfile +import shutil +from pathlib import Path +import yaml +import csv + + +@pytest.fixture +def temp_dir(): + """Create a temporary directory for tests.""" + temp_path = Path(tempfile.mkdtemp()) + yield temp_path + shutil.rmtree(temp_path) + + +@pytest.fixture +def sample_config(): + """Sample configuration dictionary.""" + return { + "scoreboard": { + "task": { + "seq": {"solution": {"max": 4}}, + "omp": {"solution": {"max": 6}}, + "stl": {"solution": {"max": 8}}, + "tbb": {"solution": {"max": 6}}, + "all": {"solution": {"max": 10}}, + "mpi": {"solution": {"max": 0}}, + }, + "plagiarism": {"coefficient": 0.5}, + "efficiency": {"num_proc": 4}, + "deadlines": { + "seq": "2025-01-15", + "omp": "2025-02-15", + "stl": "2025-03-15", + "tbb": "2025-04-15", + "all": "2025-05-15", + "mpi": "2025-06-15", + }, + } + } + + +@pytest.fixture +def sample_plagiarism_config(): + """Sample plagiarism configuration dictionary.""" + return { + "plagiarism": { + "seq": ["broken_example"], + "omp": [], + "stl": ["cheater_task"], + "tbb": [], + "all": [], + "mpi": [], + } + } + + +@pytest.fixture +def sample_task_structure(temp_dir): + """Create a sample task directory structure.""" + tasks_dir = temp_dir / "tasks" + + # Create task directories + task_dirs = [ + "example_task/seq", + "example_task/omp", + "example_task/stl", + "disabled_task_disabled/seq", + "disabled_task_disabled/omp", + "partial_task/seq", + ] + + for task_dir in task_dirs: + (tasks_dir / task_dir).mkdir(parents=True) + # Create a dummy source file + (tasks_dir / task_dir / "main.cpp").touch() + + return tasks_dir + + +@pytest.fixture +def sample_performance_csv(temp_dir): + """Create a sample performance CSV file.""" + csv_file = temp_dir / "performance.csv" + + data = [ + { + "Task": "example_task", + "SEQ": "1.0", + "OMP": "0.5", + "STL": "0.3", + "TBB": "0.4", + "ALL": "0.2", + }, + { + "Task": "disabled_task", + "SEQ": "2.0", + "OMP": "1.0", + "STL": "0.8", + "TBB": "0.9", + "ALL": "0.7", + }, + { + "Task": "partial_task", + "SEQ": "1.5", + "OMP": "N/A", + "STL": "N/A", + "TBB": "N/A", + "ALL": "N/A", + }, + ] + + with open(csv_file, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + writer.writerows(data) + + return csv_file + + +@pytest.fixture +def sample_config_files(temp_dir, sample_config, sample_plagiarism_config): + """Create sample configuration files.""" + data_dir = temp_dir / "data" + data_dir.mkdir() + + # Create threads-config.yml + config_file = data_dir / "threads-config.yml" + with open(config_file, "w") as f: + yaml.dump(sample_config, f) + + # Create plagiarism.yml + plagiarism_file = data_dir / "plagiarism.yml" + with open(plagiarism_file, "w") as f: + yaml.dump(sample_plagiarism_config, f) + + return data_dir diff --git a/scoreboard/tests/requirements.txt b/scoreboard/tests/requirements.txt new file mode 100644 index 000000000..b197d322c --- /dev/null +++ b/scoreboard/tests/requirements.txt @@ -0,0 +1 @@ +pytest>=7.0 diff --git a/scoreboard/tests/test_calculate_performance_metrics.py b/scoreboard/tests/test_calculate_performance_metrics.py new file mode 100644 index 000000000..4ed144b4e --- /dev/null +++ b/scoreboard/tests/test_calculate_performance_metrics.py @@ -0,0 +1,95 @@ +from main import calculate_performance_metrics + + +class TestCalculatePerformanceMetrics: + def test_calculate_performance_metrics_valid_values(self): + acceleration, efficiency = calculate_performance_metrics("0.5", 4) + assert acceleration == "2.00" + assert efficiency == "50.00%" + + acceleration, efficiency = calculate_performance_metrics("0.25", 4) + assert acceleration == "4.00" + assert efficiency == "100.00%" + + acceleration, efficiency = calculate_performance_metrics("0.5", 2) + assert acceleration == "2.00" + assert efficiency == "100.00%" + + def test_calculate_performance_metrics_edge_cases(self): + acceleration, efficiency = calculate_performance_metrics("0.1", 4) + assert acceleration == "10.00" + assert efficiency == "250.00%" + + acceleration, efficiency = calculate_performance_metrics("1.0", 4) + assert acceleration == "1.00" + assert efficiency == "25.00%" + + acceleration, efficiency = calculate_performance_metrics("2.0", 4) + assert acceleration == "0.50" + assert efficiency == "12.50%" + + def test_calculate_performance_metrics_invalid_values(self): + acceleration, efficiency = calculate_performance_metrics("0.0", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("-1.0", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("invalid", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("inf", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("nan", 4) + assert acceleration == "?" + assert efficiency == "?" + + def test_calculate_performance_metrics_special_strings(self): + acceleration, efficiency = calculate_performance_metrics("?", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics("N/A", 4) + assert acceleration == "?" + assert efficiency == "?" + + acceleration, efficiency = calculate_performance_metrics(None, 4) + assert acceleration == "?" + assert efficiency == "?" + + def test_calculate_performance_metrics_different_proc_counts(self): + perf_val = "0.25" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 1) + assert acceleration == "4.00" + assert efficiency == "400.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 2) + assert acceleration == "4.00" + assert efficiency == "200.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 8) + assert acceleration == "4.00" + assert efficiency == "50.00%" + + acceleration, efficiency = calculate_performance_metrics(perf_val, 16) + assert acceleration == "4.00" + assert efficiency == "25.00%" + + def test_calculate_performance_metrics_precision(self): + acceleration, efficiency = calculate_performance_metrics("0.3", 3) + assert acceleration == "3.33" + assert efficiency == "111.11%" + + acceleration, efficiency = calculate_performance_metrics("0.7", 6) + assert acceleration == "1.43" + assert efficiency == "23.81%" diff --git a/scoreboard/tests/test_check_plagiarism.py b/scoreboard/tests/test_check_plagiarism.py new file mode 100644 index 000000000..763656aad --- /dev/null +++ b/scoreboard/tests/test_check_plagiarism.py @@ -0,0 +1,81 @@ +from main import check_plagiarism_and_calculate_penalty + + +class TestCheckPlagiarismAndCalculatePenalty: + def test_check_plagiarism_flagged_task( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -2 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "cheater_task", "stl", 8, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -4 + + def test_check_plagiarism_clean_task(self, sample_config, sample_plagiarism_config): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "clean_task", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "another_task", "omp", 6, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_disabled_task_suffix( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example_disabled", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == -2 + + def test_check_plagiarism_different_task_types( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "omp", 6, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "cheater_task", "seq", 4, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_zero_points( + self, sample_config, sample_plagiarism_config + ): + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 0, sample_plagiarism_config, sample_config + ) + assert is_cheated + assert plagiarism_points == 0 + + def test_check_plagiarism_different_coefficients(self, sample_plagiarism_config): + config_75_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.75}}} + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 4, sample_plagiarism_config, config_75_percent + ) + assert is_cheated + assert plagiarism_points == -3 + + config_25_percent = {"scoreboard": {"plagiarism": {"coefficient": 0.25}}} + + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + "broken_example", "seq", 8, sample_plagiarism_config, config_25_percent + ) + assert is_cheated + assert plagiarism_points == -2 diff --git a/scoreboard/tests/test_discover_tasks.py b/scoreboard/tests/test_discover_tasks.py new file mode 100644 index 000000000..d938257d7 --- /dev/null +++ b/scoreboard/tests/test_discover_tasks.py @@ -0,0 +1,110 @@ +""" +Tests for the discover_tasks function. +""" + +from main import discover_tasks + + +class TestDiscoverTasks: + """Test cases for discover_tasks function.""" + + def test_discover_tasks_with_valid_structure(self, sample_task_structure): + """Test discovering tasks with a valid directory structure.""" + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(sample_task_structure, task_types) + + # Check that tasks are discovered correctly + assert "example_task" in result + assert "disabled_task" in result + assert "partial_task" in result + + # Check task statuses + assert result["example_task"]["seq"] == "done" + assert result["example_task"]["omp"] == "done" + assert result["example_task"]["stl"] == "done" + + assert result["disabled_task"]["seq"] == "disabled" + assert result["disabled_task"]["omp"] == "disabled" + + assert result["partial_task"]["seq"] == "done" + assert "omp" not in result["partial_task"] # No omp implementation + + def test_discover_tasks_empty_directory(self, temp_dir): + """Test discovering tasks in an empty directory.""" + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(temp_dir / "nonexistent", task_types) + + assert result == {} + + def test_discover_tasks_no_task_directories(self, temp_dir): + """Test discovering tasks when no valid task directories exist.""" + tasks_dir = temp_dir / "tasks" + tasks_dir.mkdir() + + # Create common directory (should be ignored) + (tasks_dir / "common").mkdir() + (tasks_dir / "common" / "utils.hpp").touch() + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + assert result == {} + + def test_discover_tasks_with_mixed_implementations(self, temp_dir): + """Test discovering tasks with mixed implementation availability.""" + tasks_dir = temp_dir / "tasks" + + # Create task with only some implementations + task_dir = tasks_dir / "mixed_task" + (task_dir / "seq").mkdir(parents=True) + (task_dir / "omp").mkdir(parents=True) + # No stl, tbb, all, mpi implementations + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + assert "mixed_task" in result + assert result["mixed_task"]["seq"] == "done" + assert result["mixed_task"]["omp"] == "done" + assert "stl" not in result["mixed_task"] + assert "tbb" not in result["mixed_task"] + assert "all" not in result["mixed_task"] + assert "mpi" not in result["mixed_task"] + + def test_discover_tasks_disabled_suffix_handling(self, temp_dir): + """Test correct handling of _disabled suffix in task names.""" + tasks_dir = temp_dir / "tasks" + + # Create disabled task + disabled_dir = tasks_dir / "test_task_disabled" + (disabled_dir / "seq").mkdir(parents=True) + (disabled_dir / "omp").mkdir(parents=True) + + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + + result = discover_tasks(tasks_dir, task_types) + + # Should be indexed under clean name without _disabled + assert "test_task" in result + assert "test_task_disabled" not in result + + # Should be marked as disabled + assert result["test_task"]["seq"] == "disabled" + assert result["test_task"]["omp"] == "disabled" + + def test_discover_tasks_custom_task_types(self, sample_task_structure): + """Test discovering tasks with custom task types list.""" + # Only look for seq and omp + task_types = ["seq", "omp"] + + result = discover_tasks(sample_task_structure, task_types) + + assert "example_task" in result + assert result["example_task"]["seq"] == "done" + assert result["example_task"]["omp"] == "done" + # stl should not be included even though directory exists + assert "stl" not in result["example_task"] diff --git a/scoreboard/tests/test_get_solution_points_and_style.py b/scoreboard/tests/test_get_solution_points_and_style.py new file mode 100644 index 000000000..e92ac9cf6 --- /dev/null +++ b/scoreboard/tests/test_get_solution_points_and_style.py @@ -0,0 +1,90 @@ +import pytest +from main import get_solution_points_and_style + + +class TestGetSolutionPointsAndStyle: + def test_get_solution_points_done_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", "done", sample_config + ) + assert sol_points == 4 + assert solution_style == "background-color: lightgreen;" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "done", sample_config + ) + assert sol_points == 6 + assert solution_style == "background-color: lightgreen;" + + sol_points, solution_style = get_solution_points_and_style( + "all", "done", sample_config + ) + assert sol_points == 10 + assert solution_style == "background-color: lightgreen;" + + def test_get_solution_points_disabled_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", "disabled", sample_config + ) + assert sol_points == 4 + assert solution_style == "background-color: #6495ED;" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "disabled", sample_config + ) + assert sol_points == 6 + assert solution_style == "background-color: #6495ED;" + + sol_points, solution_style = get_solution_points_and_style( + "all", "disabled", sample_config + ) + assert sol_points == 10 + assert solution_style == "background-color: #6495ED;" + + def test_get_solution_points_missing_status(self, sample_config): + sol_points, solution_style = get_solution_points_and_style( + "seq", None, sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + sol_points, solution_style = get_solution_points_and_style( + "omp", "missing", sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + sol_points, solution_style = get_solution_points_and_style( + "all", "", sample_config + ) + assert sol_points == 0 + assert solution_style == "" + + def test_get_solution_points_all_task_types(self, sample_config): + task_types = ["seq", "omp", "stl", "tbb", "all", "mpi"] + expected_points = [4, 6, 8, 6, 10, 0] + + for task_type, expected in zip(task_types, expected_points): + sol_points, solution_style = get_solution_points_and_style( + task_type, "done", sample_config + ) + assert sol_points == expected + assert solution_style == "background-color: lightgreen;" + + def test_get_solution_points_invalid_task_type(self, sample_config): + with pytest.raises((KeyError, ValueError, TypeError)): + get_solution_points_and_style("invalid_type", "done", sample_config) + + def test_get_solution_points_malformed_config(self): + malformed_config = { + "scoreboard": {"task": {"seq": {"solution": {"max": "invalid"}}}} + } + + with pytest.raises((ValueError, TypeError)): + get_solution_points_and_style("seq", "done", malformed_config) + + def test_get_solution_points_missing_config_keys(self): + incomplete_config = {"scoreboard": {}} + + with pytest.raises(KeyError): + get_solution_points_and_style("seq", "done", incomplete_config) diff --git a/scoreboard/tests/test_load_performance_data.py b/scoreboard/tests/test_load_performance_data.py new file mode 100644 index 000000000..45c657a36 --- /dev/null +++ b/scoreboard/tests/test_load_performance_data.py @@ -0,0 +1,144 @@ +""" +Tests for the load_performance_data function. +""" + +import csv +from main import load_performance_data + + +class TestLoadPerformanceData: + """Test cases for load_performance_data function.""" + + def test_load_performance_data_valid_csv(self, sample_performance_csv): + """Test loading performance data from a valid CSV file.""" + result = load_performance_data(sample_performance_csv) + + # Check structure + assert isinstance(result, dict) + assert len(result) == 3 + + # Check example_task data + assert "example_task" in result + example_data = result["example_task"] + assert example_data["seq"] == "1.0" + assert example_data["omp"] == "0.5" + assert example_data["stl"] == "0.3" + assert example_data["tbb"] == "0.4" + assert example_data["all"] == "0.2" + assert example_data["mpi"] == "N/A" + + # Check disabled_task data + assert "disabled_task" in result + disabled_data = result["disabled_task"] + assert disabled_data["seq"] == "2.0" + assert disabled_data["omp"] == "1.0" + + # Check partial_task data + assert "partial_task" in result + partial_data = result["partial_task"] + assert partial_data["seq"] == "1.5" + assert partial_data["omp"] == "N/A" + assert partial_data["mpi"] == "N/A" + + def test_load_performance_data_nonexistent_file(self, temp_dir): + """Test loading performance data when file doesn't exist.""" + nonexistent_file = temp_dir / "nonexistent.csv" + + result = load_performance_data(nonexistent_file) + + assert result == {} + + def test_load_performance_data_empty_csv(self, temp_dir): + """Test loading performance data from an empty CSV file.""" + empty_csv = temp_dir / "empty.csv" + empty_csv.touch() + + result = load_performance_data(empty_csv) + + assert result == {} + + def test_load_performance_data_header_only_csv(self, temp_dir): + """Test loading performance data from CSV with only headers.""" + header_only_csv = temp_dir / "header_only.csv" + + with open(header_only_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + + result = load_performance_data(header_only_csv) + + assert result == {} + + def test_load_performance_data_malformed_csv(self, temp_dir): + """Test loading performance data from malformed CSV.""" + malformed_csv = temp_dir / "malformed.csv" + + with open(malformed_csv, "w") as f: + f.write("Task,SEQ,OMP\n") + f.write("test_task,1.0\n") # Missing OMP value + f.write("another_task,invalid,0.5\n") # Invalid SEQ value + + # Should not crash, but may have incomplete data + result = load_performance_data(malformed_csv) + + # Function should handle this gracefully + assert isinstance(result, dict) + + def test_load_performance_data_missing_columns(self, temp_dir): + """Test loading performance data when some columns are missing.""" + partial_csv = temp_dir / "partial.csv" + + data = [ + {"Task": "test_task", "SEQ": "1.0", "OMP": "0.5"} + # Missing STL, TBB, ALL columns + ] + + with open(partial_csv, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["Task", "SEQ", "OMP"]) + writer.writeheader() + writer.writerows(data) + + # Should handle missing columns gracefully + result = load_performance_data(partial_csv) + + assert "test_task" in result + # Missing columns should be handled (likely as empty strings or errors) + task_data = result["test_task"] + assert task_data["seq"] == "1.0" + assert task_data["omp"] == "0.5" + assert task_data["mpi"] == "N/A" # This should always be set + + def test_load_performance_data_special_values(self, temp_dir): + """Test loading performance data with special values.""" + special_csv = temp_dir / "special.csv" + + data = [ + { + "Task": "special_task", + "SEQ": "0.0", + "OMP": "inf", + "STL": "-1", + "TBB": "", + "ALL": "N/A", + } + ] + + with open(special_csv, "w", newline="") as f: + writer = csv.DictWriter( + f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"] + ) + writer.writeheader() + writer.writerows(data) + + result = load_performance_data(special_csv) + + assert "special_task" in result + task_data = result["special_task"] + assert task_data["seq"] == "0.0" + assert task_data["omp"] == "inf" + assert task_data["stl"] == "-1" + assert task_data["tbb"] == "" + assert task_data["all"] == "N/A" + assert task_data["mpi"] == "N/A"