diff --git a/scoreboard/main.py b/scoreboard/main.py
index f8c707b8b..24b3f2b4b 100644
--- a/scoreboard/main.py
+++ b/scoreboard/main.py
@@ -1,5 +1,5 @@
from pathlib import Path
-from collections import defaultdict
+from collections import defaultdict, Counter
from datetime import datetime
import csv
import argparse
@@ -198,6 +198,39 @@ def _build_rows_for_task_types(
):
"""Build rows for the given list of task directories and selected task types."""
rows = []
+
+ def _load_student_info_label(dir_name: str):
+ import json
+
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return None
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ s = data.get("student", {})
+ last = s.get("last_name", "")
+ first = s.get("first_name", "")
+ middle = s.get("middle_name", "")
+ parts = [p for p in [last, first, middle] if p]
+ label = "
".join(parts)
+ return label if label else None
+ except Exception:
+ return None
+
+ def _load_variant(dir_name: str):
+ import json
+
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return "?"
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ return str(data.get("student", {}).get("variant_number", "?"))
+ except Exception:
+ return "?"
+
for dir in sorted(dir_names):
row_types = []
total_count = 0
@@ -239,7 +272,16 @@ def _build_rows_for_task_types(
)
total_count += task_points
- rows.append({"task": dir, "types": row_types, "total": total_count})
+ label_name = _load_student_info_label(dir) or dir
+ variant = _load_variant(dir)
+ rows.append(
+ {
+ "task": label_name,
+ "variant": variant,
+ "types": row_types,
+ "total": total_count,
+ }
+ )
return rows
@@ -294,14 +336,163 @@ def main():
eff_num_proc,
deadlines_cfg,
)
- processes_rows = _build_rows_for_task_types(
- task_types_processes,
- processes_task_dirs,
- perf_stats,
- cfg,
- eff_num_proc,
- deadlines_cfg,
- )
+ # Processes page: build 3 tasks as columns for a single student
+ import json
+
+ def _load_student_info(dir_name: str):
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return None
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ return data.get("student", {})
+ except Exception as e:
+ logger.warning("Failed to parse %s: %s", info_path, e)
+ return None
+
+ def _identity_key(student: dict) -> str:
+ return "|".join(
+ [
+ str(student.get("first_name", "")),
+ str(student.get("last_name", "")),
+ str(student.get("middle_name", "")),
+ str(student.get("group_number", "")),
+ ]
+ )
+
+ def _build_cell(dir_name: str, ttype: str):
+ status = directories[dir_name].get(ttype)
+ sol_points, solution_style = get_solution_points_and_style(ttype, status, cfg)
+ task_points = sol_points
+ is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty(
+ dir_name, ttype, sol_points, plagiarism_cfg, cfg
+ )
+ task_points += plagiarism_points
+ perf_val = perf_stats.get(dir_name, {}).get(ttype, "?")
+ acceleration, efficiency = calculate_performance_metrics(
+ perf_val, eff_num_proc, ttype
+ )
+ deadline_points = calculate_deadline_penalty(
+ dir_name, ttype, status, deadlines_cfg, tasks_dir
+ )
+ return (
+ {
+ "solution_points": sol_points,
+ "solution_style": solution_style,
+ "perf": perf_val,
+ "acceleration": acceleration,
+ "efficiency": efficiency,
+ "deadline_points": deadline_points,
+ "plagiarised": is_cheated,
+ "plagiarism_points": plagiarism_points,
+ },
+ task_points,
+ )
+
+ proc_infos = []
+ for d in processes_task_dirs:
+ s = _load_student_info(d)
+ if s:
+ proc_infos.append((d, s))
+
+ # Choose target identity: prefer example_processes; otherwise most common
+ target_identity = None
+ if "example_processes" in processes_task_dirs:
+ s0 = _load_student_info("example_processes")
+ if s0:
+ target_identity = _identity_key(s0)
+ if not target_identity and proc_infos:
+ cnt = Counter(_identity_key(s) for _, s in proc_infos)
+ target_identity = cnt.most_common(1)[0][0]
+
+ # Map task_number -> (dir_name, display_label)
+ num_to_dir: dict[int, tuple[str, str]] = {}
+ if target_identity:
+ for d, s in proc_infos:
+ if _identity_key(s) == target_identity:
+ try:
+ tn = int(str(s.get("task_number", "0")))
+ except Exception:
+ continue
+ display = d
+ num_to_dir[tn] = (d, display)
+
+ expected_numbers = [1, 2, 3]
+ proc_group_headers = []
+ proc_top_headers = []
+ proc_groups = []
+ total_points_sum = 0
+ for n in expected_numbers:
+ entry = num_to_dir.get(n)
+ if entry:
+ d, display_label = entry
+ # Top header shows task name (directory)
+ proc_top_headers.append(f"task-{n}")
+ # Second header row shows only mpi/seq
+ proc_group_headers.append({"type": "mpi"})
+ proc_group_headers.append({"type": "seq"})
+ for ttype in ["mpi", "seq"]:
+ cell, pts = _build_cell(d, ttype)
+ proc_groups.append(cell)
+ total_points_sum += pts
+ else:
+ proc_group_headers.append({"type": "mpi", "task_label": f"task_{n}"})
+ proc_group_headers.append({"type": "seq", "task_label": f"task_{n}"})
+ proc_top_headers.append(f"task-{n}")
+ for _ in ["mpi", "seq"]:
+ proc_groups.append(
+ {
+ "solution_points": "?",
+ "solution_style": "",
+ "perf": "?",
+ "acceleration": "?",
+ "efficiency": "?",
+ "deadline_points": "?",
+ "plagiarised": False,
+ "plagiarism_points": "?",
+ }
+ )
+ # Do not affect total; sum only existing tasks
+
+ # Label for processes row: show Last, First, Middle on separate lines; no group number
+ row_label = "processes"
+ row_variant = "?"
+ if target_identity:
+ parts = target_identity.split("|")
+ if len(parts) >= 3:
+ first, last, middle = parts[0], parts[1], parts[2]
+ name_parts = [p for p in [last, first, middle] if p]
+ name = "
".join(name_parts)
+ row_label = name or row_label
+
+ # Choose variant from the first available task (1..3)
+ def _load_variant(dir_name: str):
+ import json
+
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return "?"
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ return str(data.get("student", {}).get("variant_number", "?"))
+ except Exception:
+ return "?"
+
+ for n in expected_numbers:
+ ent = num_to_dir.get(n)
+ if ent:
+ row_variant = _load_variant(ent[0])
+ break
+ processes_rows = [
+ {
+ "task": row_label,
+ "variant": row_variant,
+ "groups": proc_groups,
+ "total": total_points_sum,
+ }
+ ]
parser = argparse.ArgumentParser(description="Generate HTML scoreboard.")
parser.add_argument(
@@ -317,8 +508,12 @@ def main():
threads_html = table_template.render(
task_types=task_types_threads, rows=threads_rows
)
- processes_html = table_template.render(
- task_types=task_types_processes, rows=processes_rows
+ # Use dedicated template for processes table layout
+ processes_template = env.get_template("processes.html.j2")
+ processes_html = processes_template.render(
+ top_task_names=proc_top_headers,
+ group_headers=proc_group_headers,
+ rows=processes_rows,
)
with open(output_path / "threads.html", "w") as f:
@@ -326,6 +521,218 @@ def main():
with open(output_path / "processes.html", "w") as f:
f.write(processes_html)
+ # ——— Build per-group pages and group menus ————————————————————————
+ def _load_group_number(dir_name: str):
+ import json
+
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return None
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ return data.get("student", {}).get("group_number")
+ except Exception:
+ return None
+
+ def _slugify(text: str) -> str:
+ return "".join(
+ ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text)
+ )
+
+ # Collect groups
+ threads_groups = sorted(
+ set(filter(None, (_load_group_number(d) for d in threads_task_dirs)))
+ )
+ processes_groups = sorted(
+ set(filter(None, (_load_group_number(d) for d in processes_task_dirs)))
+ )
+
+ # Threads: per-group pages
+ threads_groups_menu = []
+ for g in threads_groups:
+ slug = _slugify(g)
+ out_file = output_path / f"threads_{slug}.html"
+ filtered_dirs = [d for d in threads_task_dirs if _load_group_number(d) == g]
+ rows_g = _build_rows_for_task_types(
+ task_types_threads,
+ filtered_dirs,
+ perf_stats,
+ cfg,
+ eff_num_proc,
+ deadlines_cfg,
+ )
+ html_g = table_template.render(task_types=task_types_threads, rows=rows_g)
+ with open(out_file, "w") as f:
+ f.write(html_g)
+ threads_groups_menu.append({"href": out_file.name, "title": g})
+
+ # Processes: per-group pages
+ processes_groups_menu = []
+ for g in processes_groups:
+ slug = _slugify(g)
+ out_file = output_path / f"processes_{slug}.html"
+ filtered_dirs = [d for d in processes_task_dirs if _load_group_number(d) == g]
+
+ # Reuse earlier logic but limited to filtered_dirs
+ import json as _json
+
+ def _load_student_info_group(dir_name: str):
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return None
+ try:
+ with open(info_path, "r") as f:
+ data = _json.load(f)
+ return data.get("student", {})
+ except Exception:
+ return None
+
+ def _id_key(stud: dict) -> str:
+ return "|".join(
+ [
+ str(stud.get("first_name", "")),
+ str(stud.get("last_name", "")),
+ str(stud.get("middle_name", "")),
+ str(stud.get("group_number", "")),
+ ]
+ )
+
+ proc_infos_g = []
+ for d in filtered_dirs:
+ s = _load_student_info_group(d)
+ if s:
+ proc_infos_g.append((d, s))
+
+ target_identity_g = None
+ if "example_processes" in filtered_dirs:
+ s0 = _load_student_info_group("example_processes")
+ if s0 and s0.get("group_number") == g:
+ target_identity_g = _id_key(s0)
+ if not target_identity_g and proc_infos_g:
+ cnt = Counter(_id_key(s) for _, s in proc_infos_g)
+ target_identity_g = cnt.most_common(1)[0][0]
+
+ num_to_dir_g: dict[int, tuple[str, str]] = {}
+ if target_identity_g:
+ for d, s in proc_infos_g:
+ if _id_key(s) == target_identity_g:
+ try:
+ tn = int(str(s.get("task_number", "0")))
+ except Exception:
+ continue
+ num_to_dir_g[tn] = (d, d)
+
+ proc_top_headers_g = []
+ proc_group_headers_g = []
+ proc_groups_g = []
+ total_points_sum_g = 0
+ for n in [1, 2, 3]:
+ entry = num_to_dir_g.get(n)
+ if entry:
+ d, display_label = entry
+ proc_top_headers_g.append(f"task-{n}")
+ for ttype in ["mpi", "seq"]:
+ proc_group_headers_g.append({"type": ttype})
+ # build cell
+ status = directories[d].get(ttype)
+ sol_points, solution_style = get_solution_points_and_style(
+ ttype, status, cfg
+ )
+ task_points = sol_points
+ is_cheated, plagiarism_points = (
+ check_plagiarism_and_calculate_penalty(
+ d, ttype, sol_points, plagiarism_cfg, cfg
+ )
+ )
+ task_points += plagiarism_points
+ perf_val = perf_stats.get(d, {}).get(ttype, "?")
+ acceleration, efficiency = calculate_performance_metrics(
+ perf_val, eff_num_proc, ttype
+ )
+ deadline_points = calculate_deadline_penalty(
+ d, ttype, status, deadlines_cfg, tasks_dir
+ )
+ proc_groups_g.append(
+ {
+ "solution_points": sol_points,
+ "solution_style": solution_style,
+ "perf": perf_val,
+ "acceleration": acceleration,
+ "efficiency": efficiency,
+ "deadline_points": deadline_points,
+ "plagiarised": is_cheated,
+ "plagiarism_points": plagiarism_points,
+ }
+ )
+ total_points_sum_g += task_points
+ else:
+ proc_top_headers_g.append(f"task-{n}")
+ for ttype in ["mpi", "seq"]:
+ proc_group_headers_g.append({"type": ttype})
+ proc_groups_g.append(
+ {
+ "solution_points": "?",
+ "solution_style": "",
+ "perf": "?",
+ "acceleration": "?",
+ "efficiency": "?",
+ "deadline_points": "?",
+ "plagiarised": False,
+ "plagiarism_points": "?",
+ }
+ )
+ # Missing task: do not affect total; sum only existing
+
+ # Row label for group page: name without group (three lines max)
+ row_label_g = f"group {g}"
+ if target_identity_g:
+ parts = target_identity_g.split("|")
+ if len(parts) >= 3:
+ first, last, middle = parts[0], parts[1], parts[2]
+ nm_parts = [p for p in [last, first, middle] if p]
+ nm = "
".join(nm_parts)
+ row_label_g = nm or row_label_g
+
+ # Variant for group row
+ def _load_variant_g(dir_name: str):
+ import json
+
+ info_path = tasks_dir / dir_name / "info.json"
+ if not info_path.exists():
+ return "?"
+ try:
+ with open(info_path, "r") as f:
+ data = json.load(f)
+ return str(data.get("student", {}).get("variant_number", "?"))
+ except Exception:
+ return "?"
+
+ row_variant_g = "?"
+ for n in [1, 2, 3]:
+ entry2 = num_to_dir_g.get(n)
+ if entry2:
+ row_variant_g = _load_variant_g(entry2[0])
+ break
+
+ rows_g = [
+ {
+ "task": row_label_g,
+ "variant": row_variant_g,
+ "groups": proc_groups_g,
+ "total": total_points_sum_g,
+ }
+ ]
+
+ html_g = processes_template.render(
+ top_task_names=proc_top_headers_g,
+ group_headers=proc_group_headers_g,
+ rows=rows_g,
+ )
+ with open(out_file, "w") as f:
+ f.write(html_g)
+ processes_groups_menu.append({"href": out_file.name, "title": g})
+
# Render index menu page
try:
menu_template = env.get_template("menu_index.html.j2")
@@ -345,7 +752,9 @@ def main():
pages=[
{"href": "threads.html", "title": "Threads Scoreboard"},
{"href": "processes.html", "title": "Processes Scoreboard"},
- ]
+ ],
+ groups_threads=threads_groups_menu,
+ groups_processes=processes_groups_menu,
)
with open(output_path / "index.html", "w") as f:
diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2
index ccd919087..534c6b551 100644
--- a/scoreboard/templates/index.html.j2
+++ b/scoreboard/templates/index.html.j2
@@ -8,7 +8,8 @@
| Tasks | +Name | +V | {% for type in task_types %}{{ type }} | {% endfor %} @@ -24,6 +25,7 @@ {% for row in rows %}||||
|---|---|---|---|---|---|---|---|
| {{ row.task }} | +{{ row.variant }} | {% for cell in row.types %}{{ cell.solution_points }} | {{ cell.acceleration }} | diff --git a/scoreboard/templates/menu_index.html.j2 b/scoreboard/templates/menu_index.html.j2 index 88e54d4aa..ef04119b8 100644 --- a/scoreboard/templates/menu_index.html.j2 +++ b/scoreboard/templates/menu_index.html.j2 @@ -7,29 +7,48 @@||||