Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions scoreboard/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Scoreboard

HTML scoreboard generator for parallel programming tasks.

## Usage

```bash
# Install dependencies
pip install -r requirements.txt

# Generate scoreboard
python main.py -o output_directory
```

Generates `output_directory/index.html` with the scoreboard.

## Configuration

- `data/threads-config.yml` - Task points, deadlines, penalties
- `data/plagiarism.yml` - Flagged submissions

## Testing

```bash
# Install test dependencies
pip install -r tests/requirements.txt

# Run tests
python -m pytest tests/ -v
```

## Output

HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), P (plagiarism), Total.
25 changes: 15 additions & 10 deletions scoreboard/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,16 @@ def load_performance_data(perf_stat_file_path):
with open(perf_stat_file_path, "r", newline="") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
perf_stats[row["Task"]] = {
"seq": row["SEQ"],
"omp": row["OMP"],
"tbb": row["TBB"],
"stl": row["STL"],
"all": row["ALL"],
"mpi": "N/A",
}
task_name = row.get("Task")
if task_name:
perf_stats[task_name] = {
"seq": row.get("SEQ", "?"),
"omp": row.get("OMP", "?"),
"tbb": row.get("TBB", "?"),
"stl": row.get("STL", "?"),
"all": row.get("ALL", "?"),
"mpi": "N/A",
}
else:
logger.warning("Performance stats CSV not found at %s", perf_stat_file_path)
return perf_stats
Expand All @@ -67,7 +69,9 @@ def calculate_performance_metrics(perf_val, eff_num_proc):
efficiency = "?"
try:
perf_float = float(perf_val)
if perf_float > 0:
if perf_float > 0 and not (
perf_float == float("inf") or perf_float != perf_float
):
speedup = 1.0 / perf_float
acceleration = f"{speedup:.2f}"
efficiency = f"{speedup / eff_num_proc * 100:.2f}%"
Expand All @@ -92,9 +96,10 @@ def check_plagiarism_and_calculate_penalty(
dir, task_type, sol_points, plagiarism_cfg, cfg
):
"""Check if task is plagiarized and calculate penalty points."""
clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir
is_cheated = (
dir in plagiarism_cfg["plagiarism"][task_type]
or dir.rstrip("_disabled") in plagiarism_cfg["plagiarism"][task_type]
or clean_dir in plagiarism_cfg["plagiarism"][task_type]
)
plagiarism_points = 0
if is_cheated:
Expand Down
144 changes: 144 additions & 0 deletions scoreboard/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
"""
Pytest configuration and shared fixtures for scoreboard tests.
"""

import pytest
import tempfile
import shutil
from pathlib import Path
import yaml
import csv


@pytest.fixture
def temp_dir():
"""Create a temporary directory for tests."""
temp_path = Path(tempfile.mkdtemp())
yield temp_path
shutil.rmtree(temp_path)


@pytest.fixture
def sample_config():
"""Sample configuration dictionary."""
return {
"scoreboard": {
"task": {
"seq": {"solution": {"max": 4}},
"omp": {"solution": {"max": 6}},
"stl": {"solution": {"max": 8}},
"tbb": {"solution": {"max": 6}},
"all": {"solution": {"max": 10}},
"mpi": {"solution": {"max": 0}},
},
"plagiarism": {"coefficient": 0.5},
"efficiency": {"num_proc": 4},
"deadlines": {
"seq": "2025-01-15",
"omp": "2025-02-15",
"stl": "2025-03-15",
"tbb": "2025-04-15",
"all": "2025-05-15",
"mpi": "2025-06-15",
},
}
}


@pytest.fixture
def sample_plagiarism_config():
"""Sample plagiarism configuration dictionary."""
return {
"plagiarism": {
"seq": ["broken_example"],
"omp": [],
"stl": ["cheater_task"],
"tbb": [],
"all": [],
"mpi": [],
}
}


@pytest.fixture
def sample_task_structure(temp_dir):
"""Create a sample task directory structure."""
tasks_dir = temp_dir / "tasks"

# Create task directories
task_dirs = [
"example_task/seq",
"example_task/omp",
"example_task/stl",
"disabled_task_disabled/seq",
"disabled_task_disabled/omp",
"partial_task/seq",
]

for task_dir in task_dirs:
(tasks_dir / task_dir).mkdir(parents=True)
# Create a dummy source file
(tasks_dir / task_dir / "main.cpp").touch()

return tasks_dir


@pytest.fixture
def sample_performance_csv(temp_dir):
"""Create a sample performance CSV file."""
csv_file = temp_dir / "performance.csv"

data = [
{
"Task": "example_task",
"SEQ": "1.0",
"OMP": "0.5",
"STL": "0.3",
"TBB": "0.4",
"ALL": "0.2",
},
{
"Task": "disabled_task",
"SEQ": "2.0",
"OMP": "1.0",
"STL": "0.8",
"TBB": "0.9",
"ALL": "0.7",
},
{
"Task": "partial_task",
"SEQ": "1.5",
"OMP": "N/A",
"STL": "N/A",
"TBB": "N/A",
"ALL": "N/A",
},
]

with open(csv_file, "w", newline="") as f:
writer = csv.DictWriter(
f, fieldnames=["Task", "SEQ", "OMP", "STL", "TBB", "ALL"]
)
writer.writeheader()
writer.writerows(data)

return csv_file


@pytest.fixture
def sample_config_files(temp_dir, sample_config, sample_plagiarism_config):
"""Create sample configuration files."""
data_dir = temp_dir / "data"
data_dir.mkdir()

# Create threads-config.yml
config_file = data_dir / "threads-config.yml"
with open(config_file, "w") as f:
yaml.dump(sample_config, f)

# Create plagiarism.yml
plagiarism_file = data_dir / "plagiarism.yml"
with open(plagiarism_file, "w") as f:
yaml.dump(sample_plagiarism_config, f)

return data_dir
1 change: 1 addition & 0 deletions scoreboard/tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pytest>=7.0
95 changes: 95 additions & 0 deletions scoreboard/tests/test_calculate_performance_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from main import calculate_performance_metrics


class TestCalculatePerformanceMetrics:
def test_calculate_performance_metrics_valid_values(self):
acceleration, efficiency = calculate_performance_metrics("0.5", 4)
assert acceleration == "2.00"
assert efficiency == "50.00%"

acceleration, efficiency = calculate_performance_metrics("0.25", 4)
assert acceleration == "4.00"
assert efficiency == "100.00%"

acceleration, efficiency = calculate_performance_metrics("0.5", 2)
assert acceleration == "2.00"
assert efficiency == "100.00%"

def test_calculate_performance_metrics_edge_cases(self):
acceleration, efficiency = calculate_performance_metrics("0.1", 4)
assert acceleration == "10.00"
assert efficiency == "250.00%"

acceleration, efficiency = calculate_performance_metrics("1.0", 4)
assert acceleration == "1.00"
assert efficiency == "25.00%"

acceleration, efficiency = calculate_performance_metrics("2.0", 4)
assert acceleration == "0.50"
assert efficiency == "12.50%"

def test_calculate_performance_metrics_invalid_values(self):
acceleration, efficiency = calculate_performance_metrics("0.0", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("-1.0", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("invalid", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("inf", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("nan", 4)
assert acceleration == "?"
assert efficiency == "?"

def test_calculate_performance_metrics_special_strings(self):
acceleration, efficiency = calculate_performance_metrics("?", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics("N/A", 4)
assert acceleration == "?"
assert efficiency == "?"

acceleration, efficiency = calculate_performance_metrics(None, 4)
assert acceleration == "?"
assert efficiency == "?"

def test_calculate_performance_metrics_different_proc_counts(self):
perf_val = "0.25"

acceleration, efficiency = calculate_performance_metrics(perf_val, 1)
assert acceleration == "4.00"
assert efficiency == "400.00%"

acceleration, efficiency = calculate_performance_metrics(perf_val, 2)
assert acceleration == "4.00"
assert efficiency == "200.00%"

acceleration, efficiency = calculate_performance_metrics(perf_val, 8)
assert acceleration == "4.00"
assert efficiency == "50.00%"

acceleration, efficiency = calculate_performance_metrics(perf_val, 16)
assert acceleration == "4.00"
assert efficiency == "25.00%"

def test_calculate_performance_metrics_precision(self):
acceleration, efficiency = calculate_performance_metrics("0.3", 3)
assert acceleration == "3.33"
assert efficiency == "111.11%"

acceleration, efficiency = calculate_performance_metrics("0.7", 6)
assert acceleration == "1.43"
assert efficiency == "23.81%"
Loading
Loading