diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml
index 1afa5d83d..2ca93ef2c 100644
--- a/.github/workflows/pages.yml
+++ b/.github/workflows/pages.yml
@@ -76,6 +76,14 @@ jobs:
- name: Install dependencies
run: |
python3 -m pip install -r requirements.txt
+ - name: Download performance data
+ uses: actions/download-artifact@v4
+ with:
+ name: perf-stat
+ - name: Extract performance data
+ run: |
+ mkdir -p build/perf_stat_dir
+ unzip -o perf-stat.zip -d .
- name: CMake configure
run: |
cmake -S . -B build -DUSE_SCOREBOARD=ON
diff --git a/scoreboard/main.py b/scoreboard/main.py
index 2097677aa..2e4c8bd39 100644
--- a/scoreboard/main.py
+++ b/scoreboard/main.py
@@ -13,7 +13,8 @@
task_types = ["all", "mpi", "omp", "seq", "stl", "tbb"]
-tasks_dir = Path("tasks")
+script_dir = Path(__file__).parent
+tasks_dir = script_dir.parent / "tasks"
directories = defaultdict(dict)
@@ -46,7 +47,7 @@
perf_stat_file_path = (
- Path(__file__).parent.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv"
+ script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv"
)
# Read and parse performance statistics CSV
@@ -93,13 +94,15 @@
perf_val = perf_stats.get(dir, {}).get(task_type, "?")
- # Calculate efficiency if performance data is available
+ # Calculate acceleration and efficiency if performance data is available
+ acceleration = "?"
efficiency = "?"
try:
perf_float = float(perf_val)
if perf_float > 0:
speedup = 1.0 / perf_float
- efficiency = f"{speedup / eff_num_proc * 100:.2f}"
+ acceleration = f"{speedup:.2f}"
+ efficiency = f"{speedup / eff_num_proc * 100:.2f}%"
except (ValueError, TypeError):
pass
@@ -114,7 +117,11 @@
"log",
"-1",
"--format=%ct",
- str(tasks_dir / (dir + ("_disabled" if status == "disabled" else "")) / task_type),
+ str(
+ tasks_dir
+ / (dir + ("_disabled" if status == "disabled" else ""))
+ / task_type
+ ),
]
result = subprocess.run(git_cmd, capture_output=True, text=True)
if result.stdout.strip().isdigit():
@@ -130,6 +137,7 @@
"solution_points": sol_points,
"solution_style": solution_style,
"perf": perf_val,
+ "acceleration": acceleration,
"efficiency": efficiency,
"deadline_points": deadline_points,
"plagiarised": is_cheated,
diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2
index 700d5463e..4e4d9dbb9 100644
--- a/scoreboard/templates/index.html.j2
+++ b/scoreboard/templates/index.html.j2
@@ -36,7 +36,7 @@
{{ row.task }} |
{% for cell in row.types %}
{{ cell.solution_points }} |
- {{ cell.perf }} |
+ {{ cell.acceleration }} |
{{ cell.efficiency }} |
{{ cell.deadline_points }} |
{{ cell.plagiarism_points }} |
diff --git a/scripts/create_perf_table.py b/scripts/create_perf_table.py
index 3d2dc277e..92fd0b6a3 100644
--- a/scripts/create_perf_table.py
+++ b/scripts/create_perf_table.py
@@ -23,29 +23,76 @@
logs_file = open(logs_path, "r")
logs_lines = logs_file.readlines()
for line in logs_lines:
- pattern = r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)"
- result = re.findall(pattern, line)
- if len(result):
- task_name = result[0][1]
- perf_type = result[0][2]
+ # Handle both old format: tasks/task_type/task_name:perf_type:time
+ # and new format: namespace_task_type_enabled:perf_type:time
+ old_pattern = r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)"
+ new_pattern = (
+ r"(\w+_test_task_(threads|processes))_(\w+)_enabled:(\w*):(-*\d*\.\d*)"
+ )
+
+ old_result = re.findall(old_pattern, line)
+ new_result = re.findall(new_pattern, line)
+
+ if len(old_result):
+ task_name = old_result[0][1]
+ perf_type = old_result[0][2]
set_of_task_name.append(task_name)
result_tables[perf_type][task_name] = {}
for ttype in list_of_type_of_tasks:
result_tables[perf_type][task_name][ttype] = -1.0
+ elif len(new_result):
+ # Extract task name from namespace (e.g., "example_threads" from "nesterov_a_test_task_threads")
+ full_task_name = new_result[0][0]
+ task_category = new_result[0][1] # "threads" or "processes"
+ task_name = f"example_{task_category}"
+ perf_type = new_result[0][3]
+
+ if task_name not in set_of_task_name:
+ set_of_task_name.append(task_name)
+
+ if perf_type not in result_tables:
+ result_tables[perf_type] = {}
+ if task_name not in result_tables[perf_type]:
+ result_tables[perf_type][task_name] = {}
+ for ttype in list_of_type_of_tasks:
+ result_tables[perf_type][task_name][ttype] = -1.0
for line in logs_lines:
- pattern = r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)"
- result = re.findall(pattern, line)
- if len(result):
- task_type = result[0][0]
- task_name = result[0][1]
- perf_type = result[0][2]
- perf_time = float(result[0][3])
+ # Handle both old format: tasks/task_type/task_name:perf_type:time
+ # and new format: namespace_task_type_enabled:perf_type:time
+ old_pattern = r"tasks[\/|\\](\w*)[\/|\\](\w*):(\w*):(-*\d*\.\d*)"
+ new_pattern = (
+ r"(\w+_test_task_(threads|processes))_(\w+)_enabled:(\w*):(-*\d*\.\d*)"
+ )
+
+ old_result = re.findall(old_pattern, line)
+ new_result = re.findall(new_pattern, line)
+
+ if len(old_result):
+ task_type = old_result[0][0]
+ task_name = old_result[0][1]
+ perf_type = old_result[0][2]
+ perf_time = float(old_result[0][3])
if perf_time < 0.1:
msg = f"Performance time = {perf_time} < 0.1 second : for {task_type} - {task_name} - {perf_type} \n"
raise Exception(msg)
result_tables[perf_type][task_name][task_type] = perf_time
+ elif len(new_result):
+ # Extract task details from namespace format
+ full_task_name = new_result[0][0]
+ task_category = new_result[0][1] # "threads" or "processes"
+ task_type = new_result[0][2] # "all", "omp", "seq", etc.
+ perf_type = new_result[0][3]
+ perf_time = float(new_result[0][4])
+ task_name = f"example_{task_category}"
+
+ if perf_time < 0.1:
+ msg = f"Performance time = {perf_time} < 0.1 second : for {task_type} - {task_name} - {perf_type} \n"
+ raise Exception(msg)
+
+ if task_name in result_tables[perf_type]:
+ result_tables[perf_type][task_name][task_type] = perf_time
for table_name in result_tables: