From d51431a1c39d3740b9c91a827187ec09515b836f Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Wed, 2 Jul 2025 18:46:03 +0200 Subject: [PATCH 01/11] refactor CMake scripts: centralize target linking functionality --- CMakeLists.txt | 1 + cmake/gtest.cmake | 12 +++++++ cmake/json.cmake | 11 +++++++ cmake/libenvpp.cmake | 18 ++++++++++ cmake/mpi.cmake | 17 +++++++++- cmake/onetbb.cmake | 14 ++++++++ cmake/openmp.cmake | 10 ++++++ cmake/stb.cmake | 5 +++ modules/core/CMakeLists.txt | 65 +++++-------------------------------- 9 files changed, 95 insertions(+), 58 deletions(-) create mode 100644 cmake/stb.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 88f343287..1a0982a27 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,7 @@ include(cmake/modes.cmake) include(cmake/sanitizers.cmake) include(cmake/json.cmake) include(cmake/libenvpp.cmake) +include(cmake/stb.cmake) ################# Parallel programming technologies ################# diff --git a/cmake/gtest.cmake b/cmake/gtest.cmake index a9bdd6f13..eb41a7cd3 100644 --- a/cmake/gtest.cmake +++ b/cmake/gtest.cmake @@ -24,3 +24,15 @@ ExternalProject_Add( "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install") + +function(ppc_link_gtest exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/googletest/googletest/include) + + add_dependencies(${exec_func_lib} ppc_googletest) + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_googletest/install/lib") + target_link_libraries(${exec_func_lib} PUBLIC gtest gtest_main) +endfunction() diff --git a/cmake/json.cmake b/cmake/json.cmake index 89070d4b7..882553058 100644 --- a/cmake/json.cmake +++ b/cmake/json.cmake @@ -19,3 +19,14 @@ ExternalProject_Add( INSTALL_COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/build" --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_json/install") + +function(ppc_link_json exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/json/include) + + add_dependencies(${exec_func_lib} ppc_json) + target_link_directories(${exec_func_lib} INTERFACE + "${CMAKE_BINARY_DIR}/ppc_json/install/include") +endfunction() \ No newline at end of file diff --git a/cmake/libenvpp.cmake b/cmake/libenvpp.cmake index 564a7d488..e150de19b 100644 --- a/cmake/libenvpp.cmake +++ b/cmake/libenvpp.cmake @@ -38,3 +38,21 @@ if(WIN32) else() set(PPC_ENVPP_LIB_NAME envpp) endif() + +function(ppc_link_envpp exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/include) + target_include_directories( + ${exec_func_lib} SYSTEM + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/external/fmt/include) + + add_dependencies(${exec_func_lib} ppc_libenvpp) + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_libenvpp/install/lib") + target_link_directories(${exec_func_lib} PUBLIC + "${CMAKE_BINARY_DIR}/ppc_libenvpp/build") + target_link_libraries(${exec_func_lib} PUBLIC ${PPC_ENVPP_LIB_NAME}) + target_link_libraries(${exec_func_lib} PUBLIC ${PPC_FMT_LIB_NAME}) +endfunction() diff --git a/cmake/mpi.cmake b/cmake/mpi.cmake index 8b307ccdd..9394ff932 100644 --- a/cmake/mpi.cmake +++ b/cmake/mpi.cmake @@ -1,4 +1,19 @@ find_package(MPI REQUIRED) if(NOT MPI_FOUND) message(FATAL_ERROR "MPI NOT FOUND") -endif(MPI_FOUND) +endif() + +function(ppc_link_mpi exec_func_lib) + find_package(MPI REQUIRED) + if(MPI_COMPILE_FLAGS) + set_target_properties(${exec_func_lib} PROPERTIES COMPILE_FLAGS + "${MPI_COMPILE_FLAGS}") + endif(MPI_COMPILE_FLAGS) + + if(MPI_LINK_FLAGS) + set_target_properties(${exec_func_lib} PROPERTIES LINK_FLAGS + "${MPI_LINK_FLAGS}") + endif(MPI_LINK_FLAGS) + target_include_directories(${exec_func_lib} PUBLIC ${MPI_INCLUDE_PATH}) + target_link_libraries(${exec_func_lib} PUBLIC ${MPI_LIBRARIES}) +endfunction() diff --git a/cmake/onetbb.cmake b/cmake/onetbb.cmake index df89aa354..b14b2ed0e 100644 --- a/cmake/onetbb.cmake +++ b/cmake/onetbb.cmake @@ -42,3 +42,17 @@ if(cmake_build_type_lower STREQUAL "debug") else() set(PPC_TBB_LIB_NAME tbb) endif() + +function(ppc_link_tbb exec_func_lib) + # Add external project include directories + target_include_directories( + ${exec_func_lib} + PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/onetbb/include) + + add_dependencies(${exec_func_lib} ppc_onetbb) + target_link_directories(${exec_func_lib} PUBLIC + ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib) + if(NOT MSVC) + target_link_libraries(${exec_func_lib} PUBLIC ${PPC_TBB_LIB_NAME}) + endif() +endfunction() diff --git a/cmake/openmp.cmake b/cmake/openmp.cmake index 445815153..33b56e339 100644 --- a/cmake/openmp.cmake +++ b/cmake/openmp.cmake @@ -23,3 +23,13 @@ if(OpenMP_FOUND) else(OpenMP_FOUND) message(FATAL_ERROR "OpenMP NOT FOUND") endif(OpenMP_FOUND) + +function(ppc_link_threads exec_func_lib) + target_link_libraries(${exec_func_lib} PUBLIC Threads::Threads) +endfunction() + +function(ppc_link_openmp exec_func_lib) + find_package(OpenMP REQUIRED) + target_link_libraries(${exec_func_lib} PUBLIC ${OpenMP_libomp_LIBRARY} + OpenMP::OpenMP_CXX) +endfunction() diff --git a/cmake/stb.cmake b/cmake/stb.cmake new file mode 100644 index 000000000..2770d4440 --- /dev/null +++ b/cmake/stb.cmake @@ -0,0 +1,5 @@ +function(ppc_link_stb exec_func_lib) + add_library(stb_image STATIC ${CMAKE_SOURCE_DIR}/3rdparty/stb_image_wrapper.cpp) + target_include_directories(stb_image PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/stb) + target_link_libraries(${exec_func_lib} PUBLIC stb_image) +endfunction() \ No newline at end of file diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index 318572711..487b2c9f5 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -29,63 +29,14 @@ target_include_directories( ${exec_func_lib} PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty ${CMAKE_SOURCE_DIR}/modules ${CMAKE_SOURCE_DIR}/tasks) -# Add external project include directories -target_include_directories( - ${exec_func_lib} - PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/onetbb/include - ${CMAKE_SOURCE_DIR}/3rdparty/json/include - ${CMAKE_SOURCE_DIR}/3rdparty/googletest/googletest/include - ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/include) -target_include_directories( - ${exec_func_lib} SYSTEM - PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/libenvpp/external/fmt/include) - -add_dependencies(${exec_func_lib} ppc_libenvpp) -target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_libenvpp/install/lib") -target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_libenvpp/build") -target_link_libraries(${exec_func_lib} PUBLIC ${PPC_ENVPP_LIB_NAME}) -target_link_libraries(${exec_func_lib} PUBLIC ${PPC_FMT_LIB_NAME}) - -add_dependencies(${exec_func_lib} ppc_json) -target_link_directories(${exec_func_lib} INTERFACE - "${CMAKE_BINARY_DIR}/ppc_json/install/include") - -add_dependencies(${exec_func_lib} ppc_googletest) -target_link_directories(${exec_func_lib} PUBLIC - "${CMAKE_BINARY_DIR}/ppc_googletest/install/lib") -target_link_libraries(${exec_func_lib} PUBLIC gtest gtest_main) - -target_link_libraries(${exec_func_lib} PUBLIC Threads::Threads) - -find_package(OpenMP REQUIRED) -target_link_libraries(${exec_func_lib} PUBLIC ${OpenMP_libomp_LIBRARY} - OpenMP::OpenMP_CXX) - -add_dependencies(${exec_func_lib} ppc_onetbb) -target_link_directories(${exec_func_lib} PUBLIC - ${CMAKE_BINARY_DIR}/ppc_onetbb/install/lib) -if(NOT MSVC) - target_link_libraries(${exec_func_lib} PUBLIC ${PPC_TBB_LIB_NAME}) -endif() - -find_package(MPI REQUIRED) -if(MPI_COMPILE_FLAGS) - set_target_properties(${exec_func_lib} PROPERTIES COMPILE_FLAGS - "${MPI_COMPILE_FLAGS}") -endif(MPI_COMPILE_FLAGS) - -if(MPI_LINK_FLAGS) - set_target_properties(${exec_func_lib} PROPERTIES LINK_FLAGS - "${MPI_LINK_FLAGS}") -endif(MPI_LINK_FLAGS) -target_include_directories(${exec_func_lib} PUBLIC ${MPI_INCLUDE_PATH}) -target_link_libraries(${exec_func_lib} PUBLIC ${MPI_LIBRARIES}) - -add_library(stb_image STATIC ${CMAKE_SOURCE_DIR}/3rdparty/stb_image_wrapper.cpp) -target_include_directories(stb_image PUBLIC ${CMAKE_SOURCE_DIR}/3rdparty/stb) -target_link_libraries(${exec_func_lib} PUBLIC stb_image) +ppc_link_envpp(${exec_func_lib}) +ppc_link_json(${exec_func_lib}) +ppc_link_gtest(${exec_func_lib}) +ppc_link_threads(${exec_func_lib}) +ppc_link_openmp(${exec_func_lib}) +ppc_link_tbb(${exec_func_lib}) +ppc_link_mpi(${exec_func_lib}) +ppc_link_stb(${exec_func_lib}) add_executable(${exec_func_tests} ${FUNC_TESTS_SOURCE_FILES}) From e329437dc77129e800bb300fa1a5d9ea496894ea Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Wed, 8 Oct 2025 15:03:30 +0200 Subject: [PATCH 02/11] split HTML scoreboard into threads and processes views; add a menu index page --- .github/workflows/pages.yml | 4 +- scoreboard/main.py | 162 ++++++++++++++++++++---- scoreboard/templates/index.html.j2 | 12 +- scoreboard/templates/menu_index.html.j2 | 35 +++++ 4 files changed, 175 insertions(+), 38 deletions(-) create mode 100644 scoreboard/templates/menu_index.html.j2 diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml index ec0f14fe1..37256a4b9 100644 --- a/.github/workflows/pages.yml +++ b/.github/workflows/pages.yml @@ -83,8 +83,8 @@ jobs: name: perf-stat - name: Extract performance data run: | - mkdir -p build/perf_stat_dir - unzip -o perf-stat.zip -d . + mkdir -p build + unzip -o perf-stat.zip -d build - name: CMake configure run: | cmake -S . -B build -DUSE_SCOREBOARD=ON diff --git a/scoreboard/main.py b/scoreboard/main.py index dceaf820f..564515d53 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -13,19 +13,44 @@ logger = logging.getLogger(__name__) task_types = ["all", "mpi", "omp", "seq", "stl", "tbb"] +task_types_threads = ["all", "omp", "seq", "stl", "tbb"] +task_types_processes = ["mpi", "seq"] script_dir = Path(__file__).parent tasks_dir = script_dir.parent / "tasks" +def _read_tasks_type(task_dir: Path) -> str | None: + """Read tasks_type from settings.json in the task directory (if present).""" + settings_path = task_dir / "settings.json" + if settings_path.exists(): + try: + import json + + with open(settings_path, "r") as f: + data = json.load(f) + return data.get("tasks_type") # "threads" or "processes" + except Exception as e: + logger.warning("Failed to parse %s: %s", settings_path, e) + return None + + def discover_tasks(tasks_dir, task_types): - """Discover tasks and their implementation status from the filesystem.""" + """Discover tasks and their implementation status from the filesystem. + + Returns: + directories: dict[task_name][task_type] -> status + tasks_type_map: dict[task_name] -> "threads" | "processes" | None + """ directories = defaultdict(dict) + tasks_type_map: dict[str, str | None] = {} if tasks_dir.exists() and tasks_dir.is_dir(): for task_name_dir in tasks_dir.iterdir(): if task_name_dir.is_dir() and task_name_dir.name not in ["common"]: task_name = task_name_dir.name + # Save tasks_type from settings.json if present + tasks_type_map[task_name] = _read_tasks_type(task_name_dir) for task_type in task_types: task_type_dir = task_name_dir / task_type if task_type_dir.exists() and task_type_dir.is_dir(): @@ -35,10 +60,10 @@ def discover_tasks(tasks_dir, task_types): else: directories[task_name][task_type] = "done" - return directories + return directories, tasks_type_map -directories = discover_tasks(tasks_dir, task_types) +directories, tasks_type_map = discover_tasks(tasks_dir, task_types) def load_performance_data(perf_stat_file_path): @@ -163,24 +188,20 @@ def load_configurations(): return cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg -def main(): - """Main function to generate the scoreboard.""" - cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg = load_configurations() - - env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates")) - - perf_stat_file_path = ( - script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv" - ) - - # Read and parse performance statistics CSV - perf_stats = load_performance_data(perf_stat_file_path) - +def _build_rows_for_task_types( + selected_task_types: list[str], + dir_names: list[str], + perf_stats: dict, + cfg, + eff_num_proc, + deadlines_cfg, +): + """Build rows for the given list of task directories and selected task types.""" rows = [] - for dir in sorted(directories.keys()): + for dir in sorted(dir_names): row_types = [] total_count = 0 - for task_type in task_types: + for task_type in selected_task_types: status = directories[dir].get(task_type) sol_points, solution_style = get_solution_points_and_style( task_type, status, cfg @@ -219,22 +240,113 @@ def main(): total_count += task_points rows.append({"task": dir, "types": row_types, "total": total_count}) + return rows + + +def main(): + """Main function to generate the scoreboard. + + Now generates three pages in the output dir: + - index.html: simple menu linking to threads.html and processes.html + - threads.html: scoreboard for thread-based tasks + - processes.html: scoreboard for process-based tasks + """ + cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg_local = load_configurations() - template = env.get_template("index.html.j2") - html_content = template.render(task_types=task_types, rows=rows) + # Make plagiarism config available to rows builder + global plagiarism_cfg + plagiarism_cfg = plagiarism_cfg_local + + env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates")) + + # Locate perf CSV from CI or local runs + candidates = [ + script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv", + script_dir.parent / "perf_stat_dir" / "task_run_perf_table.csv", + ] + perf_stat_file_path = next((p for p in candidates if p.exists()), candidates[0]) + + # Read and parse performance statistics CSV + perf_stats = load_performance_data(perf_stat_file_path) + + # Partition tasks by tasks_type from settings.json + threads_task_dirs = [ + name for name, ttype in tasks_type_map.items() if ttype == "threads" + ] + processes_task_dirs = [ + name for name, ttype in tasks_type_map.items() if ttype == "processes" + ] + + # Fallback: if settings.json is missing, guess by directory name heuristic + for name in directories.keys(): + if name not in tasks_type_map or tasks_type_map[name] is None: + if "threads" in name: + threads_task_dirs.append(name) + elif "processes" in name: + processes_task_dirs.append(name) + + # Build rows for each page + threads_rows = _build_rows_for_task_types( + task_types_threads, threads_task_dirs, perf_stats, cfg, eff_num_proc, deadlines_cfg + ) + processes_rows = _build_rows_for_task_types( + task_types_processes, + processes_task_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, + ) parser = argparse.ArgumentParser(description="Generate HTML scoreboard.") parser.add_argument( - "-o", "--output", type=str, required=True, help="Output file path" + "-o", "--output", type=str, required=True, help="Output directory path" ) args = parser.parse_args() output_path = Path(args.output) output_path.mkdir(parents=True, exist_ok=True) - output_file = output_path / "index.html" - with open(output_file, "w") as file: - file.write(html_content) + # Render tables + table_template = env.get_template("index.html.j2") + threads_html = table_template.render( + task_types=task_types_threads, rows=threads_rows + ) + processes_html = table_template.render( + task_types=task_types_processes, rows=processes_rows + ) + + with open(output_path / "threads.html", "w") as f: + f.write(threads_html) + with open(output_path / "processes.html", "w") as f: + f.write(processes_html) + + # Render index menu page + try: + menu_template = env.get_template("menu_index.html.j2") + except Exception: + # Simple fallback menu if template missing + menu_html_content = ( + "Scoreboard" + "

Scoreboard

" + "" + ) + else: + menu_html_content = menu_template.render( + pages=[ + {"href": "threads.html", "title": "Threads Scoreboard"}, + {"href": "processes.html", "title": "Processes Scoreboard"}, + ] + ) + + with open(output_path / "index.html", "w") as f: + f.write(menu_html_content) + + # Copy static assets static_src = script_dir / "static" static_dst = output_path / "static" if static_src.exists(): @@ -245,7 +357,7 @@ def main(): else: logger.warning("Static directory not found at %s", static_src) - logger.info("HTML page generated at %s", output_file) + logger.info("HTML pages generated at %s (index.html, threads.html, processes.html)", output_path) if __name__ == "__main__": diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2 index 4e4d9dbb9..ccd919087 100644 --- a/scoreboard/templates/index.html.j2 +++ b/scoreboard/templates/index.html.j2 @@ -5,17 +5,7 @@ -

Scoreboard

-

Note: This is experimental and results are for reference only!

-

- (S)olution - The correctness and completeness of the implemented solution.
- (A)cceleration - The process of speeding up software to improve performance. - Speedup = T(seq) / T(parallel)
- (E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. - Efficiency = Speedup / NumProcs * 100%
- (D)eadline - The timeliness of the submission in relation to the given deadline.
- (P)lagiarism - The originality of the work, ensuring no copied content from external sources.
-

+ diff --git a/scoreboard/templates/menu_index.html.j2 b/scoreboard/templates/menu_index.html.j2 new file mode 100644 index 000000000..88e54d4aa --- /dev/null +++ b/scoreboard/templates/menu_index.html.j2 @@ -0,0 +1,35 @@ + + + + + Scoreboard Menu + + + + + +

Scoreboard

+ +

+ (S)olution - The correctness and completeness of the implemented solution.
+ (A)cceleration - The process of speeding up software to improve performance. Speedup = T(seq) / T(parallel)
+ (E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. Efficiency = Speedup / NumProcs * 100%
+ (D)eadline - The timeliness of the submission in relation to the given deadline.
+ (P)lagiarism - The originality of the work, ensuring no copied content from external sources. +

+
Choose a scoreboard above to view. Defaults to Threads.
+ + + From 05f2264e2fceeb4303ecade5d3a6817e60556db1 Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Wed, 8 Oct 2025 15:10:06 +0200 Subject: [PATCH 03/11] refactor scoreboard: improve code formatting and HTML consistency --- scoreboard/main.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/scoreboard/main.py b/scoreboard/main.py index 564515d53..f8c707b8b 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -287,7 +287,12 @@ def main(): # Build rows for each page threads_rows = _build_rows_for_task_types( - task_types_threads, threads_task_dirs, perf_stats, cfg, eff_num_proc, deadlines_cfg + task_types_threads, + threads_task_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, ) processes_rows = _build_rows_for_task_types( task_types_processes, @@ -327,12 +332,12 @@ def main(): except Exception: # Simple fallback menu if template missing menu_html_content = ( - "Scoreboard" + 'Scoreboard' "

Scoreboard

" "" ) else: @@ -357,7 +362,10 @@ def main(): else: logger.warning("Static directory not found at %s", static_src) - logger.info("HTML pages generated at %s (index.html, threads.html, processes.html)", output_path) + logger.info( + "HTML pages generated at %s (index.html, threads.html, processes.html)", + output_path, + ) if __name__ == "__main__": From 6f65278e97a71313e8a66729ff606ecb4d2ae9cb Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Thu, 9 Oct 2025 13:05:18 +0200 Subject: [PATCH 04/11] extend scoreboard: add group-specific views, student variants, and enhanced processes layout --- scoreboard/main.py | 413 +++++++++++++++++++++++- scoreboard/templates/index.html.j2 | 4 +- scoreboard/templates/menu_index.html.j2 | 27 +- tasks/example_processes/info.json | 12 +- tasks/example_threads/info.json | 12 +- 5 files changed, 438 insertions(+), 30 deletions(-) diff --git a/scoreboard/main.py b/scoreboard/main.py index f8c707b8b..1abb28021 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -1,5 +1,5 @@ from pathlib import Path -from collections import defaultdict +from collections import defaultdict, Counter from datetime import datetime import csv import argparse @@ -198,6 +198,37 @@ def _build_rows_for_task_types( ): """Build rows for the given list of task directories and selected task types.""" rows = [] + def _load_student_info_label(dir_name: str): + import json + + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + s = data.get("student", {}) + last = s.get("last_name", "") + first = s.get("first_name", "") + middle = s.get("middle_name", "") + parts = [p for p in [last, first, middle] if p] + label = "
".join(parts) + return label if label else None + except Exception: + return None + + def _load_variant(dir_name: str): + import json + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return "?" + try: + with open(info_path, "r") as f: + data = json.load(f) + return str(data.get("student", {}).get("variant_number", "?")) + except Exception: + return "?" + for dir in sorted(dir_names): row_types = [] total_count = 0 @@ -239,7 +270,14 @@ def _build_rows_for_task_types( ) total_count += task_points - rows.append({"task": dir, "types": row_types, "total": total_count}) + label_name = _load_student_info_label(dir) or dir + variant = _load_variant(dir) + rows.append({ + "task": label_name, + "variant": variant, + "types": row_types, + "total": total_count, + }) return rows @@ -294,14 +332,160 @@ def main(): eff_num_proc, deadlines_cfg, ) - processes_rows = _build_rows_for_task_types( - task_types_processes, - processes_task_dirs, - perf_stats, - cfg, - eff_num_proc, - deadlines_cfg, - ) + # Processes page: build 3 tasks as columns for a single student + import json + + def _load_student_info(dir_name: str): + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + return data.get("student", {}) + except Exception as e: + logger.warning("Failed to parse %s: %s", info_path, e) + return None + + def _identity_key(student: dict) -> str: + return "|".join( + [ + str(student.get("first_name", "")), + str(student.get("last_name", "")), + str(student.get("middle_name", "")), + str(student.get("group_number", "")), + ] + ) + + def _build_cell(dir_name: str, ttype: str): + status = directories[dir_name].get(ttype) + sol_points, solution_style = get_solution_points_and_style(ttype, status, cfg) + task_points = sol_points + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + dir_name, ttype, sol_points, plagiarism_cfg, cfg + ) + task_points += plagiarism_points + perf_val = perf_stats.get(dir_name, {}).get(ttype, "?") + acceleration, efficiency = calculate_performance_metrics( + perf_val, eff_num_proc, ttype + ) + deadline_points = calculate_deadline_penalty( + dir_name, ttype, status, deadlines_cfg, tasks_dir + ) + return ( + { + "solution_points": sol_points, + "solution_style": solution_style, + "perf": perf_val, + "acceleration": acceleration, + "efficiency": efficiency, + "deadline_points": deadline_points, + "plagiarised": is_cheated, + "plagiarism_points": plagiarism_points, + }, + task_points, + ) + + proc_infos = [] + for d in processes_task_dirs: + s = _load_student_info(d) + if s: + proc_infos.append((d, s)) + + # Choose target identity: prefer example_processes; otherwise most common + target_identity = None + if "example_processes" in processes_task_dirs: + s0 = _load_student_info("example_processes") + if s0: + target_identity = _identity_key(s0) + if not target_identity and proc_infos: + cnt = Counter(_identity_key(s) for _, s in proc_infos) + target_identity = cnt.most_common(1)[0][0] + + # Map task_number -> (dir_name, display_label) + num_to_dir: dict[int, tuple[str, str]] = {} + if target_identity: + for d, s in proc_infos: + if _identity_key(s) == target_identity: + try: + tn = int(str(s.get("task_number", "0"))) + except Exception: + continue + display = d + num_to_dir[tn] = (d, display) + + expected_numbers = [1, 2, 3] + proc_group_headers = [] + proc_top_headers = [] + proc_groups = [] + total_points_sum = 0 + for n in expected_numbers: + entry = num_to_dir.get(n) + if entry: + d, display_label = entry + # Top header shows task name (directory) + proc_top_headers.append(f"task-{n}") + # Second header row shows only mpi/seq + proc_group_headers.append({"type": "mpi"}) + proc_group_headers.append({"type": "seq"}) + for ttype in ["mpi", "seq"]: + cell, pts = _build_cell(d, ttype) + proc_groups.append(cell) + total_points_sum += pts + else: + proc_group_headers.append({"type": "mpi", "task_label": f"task_{n}"}) + proc_group_headers.append({"type": "seq", "task_label": f"task_{n}"}) + proc_top_headers.append(f"task-{n}") + for _ in ["mpi", "seq"]: + proc_groups.append( + { + "solution_points": "?", + "solution_style": "", + "perf": "?", + "acceleration": "?", + "efficiency": "?", + "deadline_points": "?", + "plagiarised": False, + "plagiarism_points": "?", + } + ) + # Do not affect total; sum only existing tasks + + # Label for processes row: show Last, First, Middle on separate lines; no group number + row_label = "processes" + row_variant = "?" + if target_identity: + parts = target_identity.split("|") + if len(parts) >= 4: + first, last, middle, _group = parts[0], parts[1], parts[2], parts[3] + name_parts = [p for p in [last, first, middle] if p] + name = "
".join(name_parts) + row_label = name or row_label + # Choose variant from the first available task (1..3) + def _load_variant(dir_name: str): + import json + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return "?" + try: + with open(info_path, "r") as f: + data = json.load(f) + return str(data.get("student", {}).get("variant_number", "?")) + except Exception: + return "?" + for n in expected_numbers: + ent = num_to_dir.get(n) + if ent: + row_variant = _load_variant(ent[0]) + break + processes_rows = [ + { + "task": row_label, + "variant": row_variant, + "groups": proc_groups, + "total": total_points_sum, + } + ] parser = argparse.ArgumentParser(description="Generate HTML scoreboard.") parser.add_argument( @@ -317,8 +501,10 @@ def main(): threads_html = table_template.render( task_types=task_types_threads, rows=threads_rows ) - processes_html = table_template.render( - task_types=task_types_processes, rows=processes_rows + # Use dedicated template for processes table layout + processes_template = env.get_template("processes.html.j2") + processes_html = processes_template.render( + top_task_names=proc_top_headers, group_headers=proc_group_headers, rows=processes_rows ) with open(output_path / "threads.html", "w") as f: @@ -326,6 +512,205 @@ def main(): with open(output_path / "processes.html", "w") as f: f.write(processes_html) + # ——— Build per-group pages and group menus ———————————————————————— + def _load_group_number(dir_name: str): + import json + + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = json.load(f) + return data.get("student", {}).get("group_number") + except Exception: + return None + + def _slugify(text: str) -> str: + return "".join(ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text)) + + # Collect groups + threads_groups = sorted( + set(filter(None, (_load_group_number(d) for d in threads_task_dirs))) + ) + processes_groups = sorted( + set(filter(None, (_load_group_number(d) for d in processes_task_dirs))) + ) + + # Threads: per-group pages + threads_groups_menu = [] + for g in threads_groups: + slug = _slugify(g) + out_file = output_path / f"threads_{slug}.html" + filtered_dirs = [d for d in threads_task_dirs if _load_group_number(d) == g] + rows_g = _build_rows_for_task_types( + task_types_threads, filtered_dirs, perf_stats, cfg, eff_num_proc, deadlines_cfg + ) + html_g = table_template.render(task_types=task_types_threads, rows=rows_g) + with open(out_file, "w") as f: + f.write(html_g) + threads_groups_menu.append({"href": out_file.name, "title": g}) + + # Processes: per-group pages + processes_groups_menu = [] + for g in processes_groups: + slug = _slugify(g) + out_file = output_path / f"processes_{slug}.html" + filtered_dirs = [d for d in processes_task_dirs if _load_group_number(d) == g] + + # Reuse earlier logic but limited to filtered_dirs + import json as _json + def _load_student_info_group(dir_name: str): + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return None + try: + with open(info_path, "r") as f: + data = _json.load(f) + return data.get("student", {}) + except Exception: + return None + + def _id_key(stud: dict) -> str: + return "|".join( + [ + str(stud.get("first_name", "")), + str(stud.get("last_name", "")), + str(stud.get("middle_name", "")), + str(stud.get("group_number", "")), + ] + ) + + proc_infos_g = [] + for d in filtered_dirs: + s = _load_student_info_group(d) + if s: + proc_infos_g.append((d, s)) + + target_identity_g = None + if "example_processes" in filtered_dirs: + s0 = _load_student_info_group("example_processes") + if s0 and s0.get("group_number") == g: + target_identity_g = _id_key(s0) + if not target_identity_g and proc_infos_g: + cnt = Counter(_id_key(s) for _, s in proc_infos_g) + target_identity_g = cnt.most_common(1)[0][0] + + num_to_dir_g: dict[int, tuple[str, str]] = {} + if target_identity_g: + for d, s in proc_infos_g: + if _id_key(s) == target_identity_g: + try: + tn = int(str(s.get("task_number", "0"))) + except Exception: + continue + num_to_dir_g[tn] = (d, d) + + proc_top_headers_g = [] + proc_group_headers_g = [] + proc_groups_g = [] + total_points_sum_g = 0 + for n in [1, 2, 3]: + entry = num_to_dir_g.get(n) + if entry: + d, display_label = entry + proc_top_headers_g.append(f"task-{n}") + for ttype in ["mpi", "seq"]: + proc_group_headers_g.append({"type": ttype}) + # build cell + status = directories[d].get(ttype) + sol_points, solution_style = get_solution_points_and_style( + ttype, status, cfg + ) + task_points = sol_points + is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( + d, ttype, sol_points, plagiarism_cfg, cfg + ) + task_points += plagiarism_points + perf_val = perf_stats.get(d, {}).get(ttype, "?") + acceleration, efficiency = calculate_performance_metrics( + perf_val, eff_num_proc, ttype + ) + deadline_points = calculate_deadline_penalty( + d, ttype, status, deadlines_cfg, tasks_dir + ) + proc_groups_g.append( + { + "solution_points": sol_points, + "solution_style": solution_style, + "perf": perf_val, + "acceleration": acceleration, + "efficiency": efficiency, + "deadline_points": deadline_points, + "plagiarised": is_cheated, + "plagiarism_points": plagiarism_points, + } + ) + total_points_sum_g += task_points + else: + proc_top_headers_g.append(f"task-{n}") + for ttype in ["mpi", "seq"]: + proc_group_headers_g.append({"type": ttype}) + proc_groups_g.append( + { + "solution_points": "?", + "solution_style": "", + "perf": "?", + "acceleration": "?", + "efficiency": "?", + "deadline_points": "?", + "plagiarised": False, + "plagiarism_points": "?", + } + ) + # Missing task: do not affect total; sum only existing + + # Row label for group page: name without group (three lines max) + row_label_g = f"group {g}" + if target_identity_g: + parts = target_identity_g.split("|") + if len(parts) >= 4: + first, last, middle, _group = parts[0], parts[1], parts[2], parts[3] + nm_parts = [p for p in [last, first, middle] if p] + nm = "
".join(nm_parts) + row_label_g = nm or row_label_g + # Variant for group row + def _load_variant_g(dir_name: str): + import json + info_path = tasks_dir / dir_name / "info.json" + if not info_path.exists(): + return "?" + try: + with open(info_path, "r") as f: + data = json.load(f) + return str(data.get("student", {}).get("variant_number", "?")) + except Exception: + return "?" + row_variant_g = "?" + for n in [1, 2, 3]: + entry2 = num_to_dir_g.get(n) + if entry2: + row_variant_g = _load_variant_g(entry2[0]) + break + + rows_g = [ + { + "task": row_label_g, + "variant": row_variant_g, + "groups": proc_groups_g, + "total": total_points_sum_g, + } + ] + + html_g = processes_template.render( + top_task_names=proc_top_headers_g, + group_headers=proc_group_headers_g, + rows=rows_g, + ) + with open(out_file, "w") as f: + f.write(html_g) + processes_groups_menu.append({"href": out_file.name, "title": g}) + # Render index menu page try: menu_template = env.get_template("menu_index.html.j2") @@ -345,7 +730,9 @@ def main(): pages=[ {"href": "threads.html", "title": "Threads Scoreboard"}, {"href": "processes.html", "title": "Processes Scoreboard"}, - ] + ], + groups_threads=threads_groups_menu, + groups_processes=processes_groups_menu, ) with open(output_path / "index.html", "w") as f: diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2 index ccd919087..534c6b551 100644 --- a/scoreboard/templates/index.html.j2 +++ b/scoreboard/templates/index.html.j2 @@ -8,7 +8,8 @@
Tasks
- + + {% for type in task_types %} {% endfor %} @@ -24,6 +25,7 @@ {% for row in rows %} + {% for cell in row.types %} diff --git a/scoreboard/templates/menu_index.html.j2 b/scoreboard/templates/menu_index.html.j2 index 88e54d4aa..ef04119b8 100644 --- a/scoreboard/templates/menu_index.html.j2 +++ b/scoreboard/templates/menu_index.html.j2 @@ -7,29 +7,48 @@

Scoreboard

+

+ (V)ariant - Task variant number assigned to the student.
(S)olution - The correctness and completeness of the implemented solution.
(A)cceleration - The process of speeding up software to improve performance. Speedup = T(seq) / T(parallel)
(E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. Efficiency = Speedup / NumProcs * 100%
(D)eadline - The timeliness of the submission in relation to the given deadline.
(P)lagiarism - The originality of the work, ensuring no copied content from external sources.

-
Choose a scoreboard above to view. Defaults to Threads.
diff --git a/tasks/example_processes/info.json b/tasks/example_processes/info.json index a2c75b29a..513436aac 100644 --- a/tasks/example_processes/info.json +++ b/tasks/example_processes/info.json @@ -1,10 +1,10 @@ { "student": { - "first_name": "", - "last_name": "", - "middle_name": "", - "group_number": "", - "task_number": "", - "variant_number": "" + "first_name": "first_name_p", + "last_name": "last_name_p", + "middle_name": "middle_name_p", + "group_number": "2222222_p", + "task_number": "1", + "variant_number": "23" } } diff --git a/tasks/example_threads/info.json b/tasks/example_threads/info.json index a2c75b29a..cde6f792c 100644 --- a/tasks/example_threads/info.json +++ b/tasks/example_threads/info.json @@ -1,10 +1,10 @@ { "student": { - "first_name": "", - "last_name": "", - "middle_name": "", - "group_number": "", - "task_number": "", - "variant_number": "" + "first_name": "first_name_t", + "last_name": "last_name_t", + "middle_name": "middle_name_t", + "group_number": "2222222_t", + "task_number": "1", + "variant_number": "23" } } From 6d01e07fd2d7ab5b81bb6c8d09bdb3fb7dc9ad48 Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Thu, 9 Oct 2025 13:05:33 +0200 Subject: [PATCH 05/11] add processes view: introduce detailed layout for processes scoreboard --- scoreboard/templates/processes.html.j2 | 45 ++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 scoreboard/templates/processes.html.j2 diff --git a/scoreboard/templates/processes.html.j2 b/scoreboard/templates/processes.html.j2 new file mode 100644 index 000000000..0898dff5f --- /dev/null +++ b/scoreboard/templates/processes.html.j2 @@ -0,0 +1,45 @@ + + + + Processes Scoreboard + + + +
TasksNameV{{ type }}
{{ row.task }}{{ row.variant }}{{ cell.solution_points }} {{ cell.acceleration }}
+ + + + {% for name in top_task_names %} + + {% endfor %} + + + + {% for header in group_headers %} + + {% endfor %} + + + {% for _ in group_headers %} + {% for letter in ('S', 'A', 'E', 'D', 'P') %} + + {% endfor %} + {% endfor %} + + {% for row in rows %} + + + + {% for cell in row.groups %} + + + + + + {% endfor %} + + + {% endfor %} +
NameV{{ name }}Total
{{ header.type }}
{{ letter }}
{{ row.task }}{{ row.variant }}{{ cell.solution_points }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ row.total }}
+ + From 8261acd6028ed59eb714042d514836d2414b76fc Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 9 Oct 2025 13:11:51 +0200 Subject: [PATCH 06/11] Update pages.yml --- .github/workflows/pages.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml index 37256a4b9..ec0f14fe1 100644 --- a/.github/workflows/pages.yml +++ b/.github/workflows/pages.yml @@ -83,8 +83,8 @@ jobs: name: perf-stat - name: Extract performance data run: | - mkdir -p build - unzip -o perf-stat.zip -d build + mkdir -p build/perf_stat_dir + unzip -o perf-stat.zip -d . - name: CMake configure run: | cmake -S . -B build -DUSE_SCOREBOARD=ON From 47c4e0e61fdd7f24a5822209c4bd0bb241c6d55d Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Thu, 9 Oct 2025 13:14:10 +0200 Subject: [PATCH 07/11] comment out dependencies for perf workflow in main.yml --- .github/workflows/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index b41003750..d5c526753 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -31,10 +31,10 @@ jobs: - pre-commit uses: ./.github/workflows/windows.yml perf: - needs: - - ubuntu - - mac - - windows +# needs: +# - ubuntu +# - mac +# - windows uses: ./.github/workflows/perf.yml pages: From e3848bc4565d5a92940019c7c33b4e4748d13067 Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Thu, 9 Oct 2025 13:16:29 +0200 Subject: [PATCH 08/11] uncomment dependencies for perf workflow in main.yml --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d5c526753..68afc8704 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -31,8 +31,8 @@ jobs: - pre-commit uses: ./.github/workflows/windows.yml perf: -# needs: -# - ubuntu + needs: + - ubuntu # - mac # - windows uses: ./.github/workflows/perf.yml From c03fa1773aa82d4703d32d7d7f4ece75f87d9b26 Mon Sep 17 00:00:00 2001 From: Alexander Nesterov Date: Thu, 9 Oct 2025 13:24:57 +0200 Subject: [PATCH 09/11] refactor scoreboard: improve code formatting and ensure consistent indentation --- scoreboard/main.py | 52 +++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/scoreboard/main.py b/scoreboard/main.py index 1abb28021..24b3f2b4b 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -198,6 +198,7 @@ def _build_rows_for_task_types( ): """Build rows for the given list of task directories and selected task types.""" rows = [] + def _load_student_info_label(dir_name: str): import json @@ -219,6 +220,7 @@ def _load_student_info_label(dir_name: str): def _load_variant(dir_name: str): import json + info_path = tasks_dir / dir_name / "info.json" if not info_path.exists(): return "?" @@ -272,12 +274,14 @@ def _load_variant(dir_name: str): label_name = _load_student_info_label(dir) or dir variant = _load_variant(dir) - rows.append({ - "task": label_name, - "variant": variant, - "types": row_types, - "total": total_count, - }) + rows.append( + { + "task": label_name, + "variant": variant, + "types": row_types, + "total": total_count, + } + ) return rows @@ -456,14 +460,16 @@ def _build_cell(dir_name: str, ttype: str): row_variant = "?" if target_identity: parts = target_identity.split("|") - if len(parts) >= 4: - first, last, middle, _group = parts[0], parts[1], parts[2], parts[3] + if len(parts) >= 3: + first, last, middle = parts[0], parts[1], parts[2] name_parts = [p for p in [last, first, middle] if p] name = "
".join(name_parts) row_label = name or row_label + # Choose variant from the first available task (1..3) def _load_variant(dir_name: str): import json + info_path = tasks_dir / dir_name / "info.json" if not info_path.exists(): return "?" @@ -473,6 +479,7 @@ def _load_variant(dir_name: str): return str(data.get("student", {}).get("variant_number", "?")) except Exception: return "?" + for n in expected_numbers: ent = num_to_dir.get(n) if ent: @@ -504,7 +511,9 @@ def _load_variant(dir_name: str): # Use dedicated template for processes table layout processes_template = env.get_template("processes.html.j2") processes_html = processes_template.render( - top_task_names=proc_top_headers, group_headers=proc_group_headers, rows=processes_rows + top_task_names=proc_top_headers, + group_headers=proc_group_headers, + rows=processes_rows, ) with open(output_path / "threads.html", "w") as f: @@ -527,7 +536,9 @@ def _load_group_number(dir_name: str): return None def _slugify(text: str) -> str: - return "".join(ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text)) + return "".join( + ch if ch.isalnum() or ch in ("-", "_") else "_" for ch in str(text) + ) # Collect groups threads_groups = sorted( @@ -544,7 +555,12 @@ def _slugify(text: str) -> str: out_file = output_path / f"threads_{slug}.html" filtered_dirs = [d for d in threads_task_dirs if _load_group_number(d) == g] rows_g = _build_rows_for_task_types( - task_types_threads, filtered_dirs, perf_stats, cfg, eff_num_proc, deadlines_cfg + task_types_threads, + filtered_dirs, + perf_stats, + cfg, + eff_num_proc, + deadlines_cfg, ) html_g = table_template.render(task_types=task_types_threads, rows=rows_g) with open(out_file, "w") as f: @@ -560,6 +576,7 @@ def _slugify(text: str) -> str: # Reuse earlier logic but limited to filtered_dirs import json as _json + def _load_student_info_group(dir_name: str): info_path = tasks_dir / dir_name / "info.json" if not info_path.exists(): @@ -623,8 +640,10 @@ def _id_key(stud: dict) -> str: ttype, status, cfg ) task_points = sol_points - is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - d, ttype, sol_points, plagiarism_cfg, cfg + is_cheated, plagiarism_points = ( + check_plagiarism_and_calculate_penalty( + d, ttype, sol_points, plagiarism_cfg, cfg + ) ) task_points += plagiarism_points perf_val = perf_stats.get(d, {}).get(ttype, "?") @@ -669,14 +688,16 @@ def _id_key(stud: dict) -> str: row_label_g = f"group {g}" if target_identity_g: parts = target_identity_g.split("|") - if len(parts) >= 4: - first, last, middle, _group = parts[0], parts[1], parts[2], parts[3] + if len(parts) >= 3: + first, last, middle = parts[0], parts[1], parts[2] nm_parts = [p for p in [last, first, middle] if p] nm = "
".join(nm_parts) row_label_g = nm or row_label_g + # Variant for group row def _load_variant_g(dir_name: str): import json + info_path = tasks_dir / dir_name / "info.json" if not info_path.exists(): return "?" @@ -686,6 +707,7 @@ def _load_variant_g(dir_name: str): return str(data.get("student", {}).get("variant_number", "?")) except Exception: return "?" + row_variant_g = "?" for n in [1, 2, 3]: entry2 = num_to_dir_g.get(n) From ab04bcd31bd9f91d58e131b5878d8470bef7c4c7 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 9 Oct 2025 15:29:39 +0200 Subject: [PATCH 10/11] Update main.yml --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 68afc8704..50424351b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -33,8 +33,8 @@ jobs: perf: needs: - ubuntu -# - mac -# - windows + - mac + - windows uses: ./.github/workflows/perf.yml pages: From b7dae9cd80b4f37e23dd56cd5bd917b227861e51 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Thu, 9 Oct 2025 15:29:59 +0200 Subject: [PATCH 11/11] Fix indentation in workflow dependencies --- .github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 50424351b..b41003750 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -33,8 +33,8 @@ jobs: perf: needs: - ubuntu - - mac - - windows + - mac + - windows uses: ./.github/workflows/perf.yml pages: