diff --git a/modules/performance/include/performance.hpp b/modules/performance/include/performance.hpp index 8a378c838..6b74462b0 100644 --- a/modules/performance/include/performance.hpp +++ b/modules/performance/include/performance.hpp @@ -39,12 +39,12 @@ template class Perf { public: // Init performance analysis with an initialized task and initialized data - explicit Perf(const ppc::task::TaskPtr& task_ptr) : task_(task_ptr) { + explicit Perf(const ppc::task::TaskPtr &task_ptr) : task_(task_ptr) { task_ptr->GetStateOfTesting() = ppc::task::StateOfTesting::kPerf; } // Check performance of full task's pipeline: PreProcessing() -> // Validation() -> Run() -> PostProcessing() - void PipelineRun(const PerfAttr& perf_attr) { + void PipelineRun(const PerfAttr &perf_attr) { perf_results_.type_of_running = PerfResults::TypeOfRunning::kPipeline; CommonRun(perf_attr, [&] { @@ -55,7 +55,7 @@ class Perf { }, perf_results_); } // Check performance of task's Run() function - void TaskRun(const PerfAttr& perf_attr) { + void TaskRun(const PerfAttr &perf_attr) { perf_results_.type_of_running = PerfResults::TypeOfRunning::kTaskRun; task_->Validation(); @@ -69,7 +69,7 @@ class Perf { task_->PostProcessing(); } // Print results for automation checkers - void PrintPerfStatistic(const std::string& test_id) const { + void PrintPerfStatistic(const std::string &test_id) const { std::string type_test_name; if (perf_results_.type_of_running == PerfResults::TypeOfRunning::kTaskRun) { type_test_name = "task_run"; @@ -106,7 +106,7 @@ class Perf { private: PerfResults perf_results_; std::shared_ptr> task_; - static void CommonRun(const PerfAttr& perf_attr, const std::function& pipeline, PerfResults& perf_results) { + static void CommonRun(const PerfAttr &perf_attr, const std::function &pipeline, PerfResults &perf_results) { auto begin = perf_attr.current_timer(); for (uint64_t i = 0; i < perf_attr.num_running; i++) { pipeline(); diff --git a/modules/performance/tests/perf_tests.cpp b/modules/performance/tests/perf_tests.cpp index 5394c4769..9a4ce9e36 100644 --- a/modules/performance/tests/perf_tests.cpp +++ b/modules/performance/tests/perf_tests.cpp @@ -25,7 +25,7 @@ namespace ppc::test { template class TestPerfTask : public ppc::task::Task { public: - explicit TestPerfTask(const InType& in) { + explicit TestPerfTask(const InType &in) { this->GetInput() = in; } @@ -53,7 +53,7 @@ class TestPerfTask : public ppc::task::Task { template class FakePerfTask : public TestPerfTask { public: - explicit FakePerfTask(const InType& in) : TestPerfTask(in) {} + explicit FakePerfTask(const InType &in) : TestPerfTask(in) {} bool RunImpl() override { std::this_thread::sleep_for(std::chrono::seconds(11)); @@ -164,7 +164,7 @@ TEST(PerfTests, CheckPerfTaskFloat) { struct ParamTestCase { PerfResults::TypeOfRunning input; std::string expected_output; - friend void PrintTo(const ParamTestCase& param, std::ostream* os) { + friend void PrintTo(const ParamTestCase ¶m, std::ostream *os) { *os << "{ input = " << static_cast(param.input) << ", expected = " << param.expected_output << " }"; } }; @@ -172,7 +172,7 @@ struct ParamTestCase { class GetStringParamNameParamTest : public ::testing::TestWithParam {}; TEST_P(GetStringParamNameParamTest, ReturnsExpectedString) { - const auto& param = GetParam(); + const auto ¶m = GetParam(); EXPECT_EQ(GetStringParamName(param.input), param.expected_output); } @@ -180,7 +180,7 @@ INSTANTIATE_TEST_SUITE_P(ParamTests, GetStringParamNameParamTest, ::testing::Values(ParamTestCase{PerfResults::TypeOfRunning::kTaskRun, "task_run"}, ParamTestCase{PerfResults::TypeOfRunning::kPipeline, "pipeline"}, ParamTestCase{PerfResults::TypeOfRunning::kNone, "none"}), - [](const ::testing::TestParamInfo& info) { + [](const ::testing::TestParamInfo &info) { return info.param.expected_output; }); @@ -188,7 +188,7 @@ struct TaskTypeTestCase { TypeOfTask type; std::string expected; std::string label; - friend void PrintTo(const TaskTypeTestCase& param, std::ostream* os) { + friend void PrintTo(const TaskTypeTestCase ¶m, std::ostream *os) { *os << "{ type = " << static_cast(param.type) << ", expected = " << param.expected << ", label = " << param.label << " }"; } @@ -217,7 +217,7 @@ class GetStringTaskTypeTest : public ::testing::TestWithParam }; TEST_P(GetStringTaskTypeTest, ReturnsExpectedString) { - const auto& param = GetParam(); + const auto ¶m = GetParam(); EXPECT_EQ(GetStringTaskType(param.type, temp_path), param.expected) << "Failed on: " << param.label; } @@ -236,7 +236,7 @@ TEST(GetStringTaskTypeStandaloneTest, ThrowsIfFileMissing) { TEST(GetStringTaskTypeStandaloneTest, ExceptionMessageContainsPath) { const std::string missing_path = "non_existent_settings.json"; - EXPECT_THROW(try { GetStringTaskType(TypeOfTask::kSEQ, missing_path); } catch (const std::runtime_error& e) { + EXPECT_THROW(try { GetStringTaskType(TypeOfTask::kSEQ, missing_path); } catch (const std::runtime_error &e) { EXPECT_NE(std::string(e.what()).find(missing_path), std::string::npos); throw; }, diff --git a/modules/runners/include/runners.hpp b/modules/runners/include/runners.hpp index 56a46f1e0..b957e5323 100644 --- a/modules/runners/include/runners.hpp +++ b/modules/runners/include/runners.hpp @@ -13,7 +13,7 @@ class UnreadMessagesDetector : public ::testing::EmptyTestEventListener { public: UnreadMessagesDetector() = default; /// @brief Called by GTest after a test ends. Checks for unread messages. - void OnTestEnd(const ::testing::TestInfo& /*test_info*/) override; + void OnTestEnd(const ::testing::TestInfo & /*test_info*/) override; private: }; @@ -26,9 +26,9 @@ class WorkerTestFailurePrinter : public ::testing::EmptyTestEventListener { /// @param base A shared pointer to another GTest event listener. explicit WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener> base) : base_(std::move(base)) {} /// @brief Called after a test ends. Passes call base listener and print failures with rank. - void OnTestEnd(const ::testing::TestInfo& test_info) override; + void OnTestEnd(const ::testing::TestInfo &test_info) override; /// @brief Called when a test part fails. Prints MPI rank info along with the failure. - void OnTestPartResult(const ::testing::TestPartResult& test_part_result) override; + void OnTestPartResult(const ::testing::TestPartResult &test_part_result) override; private: /// @brief Prints the MPI rank of the current process to stderr. @@ -41,12 +41,12 @@ class WorkerTestFailurePrinter : public ::testing::EmptyTestEventListener { /// @param argv Argument vector. /// @return Exit code from RUN_ALL_TESTS or MPI error code if initialization/ /// finalization fails. -int Init(int argc, char** argv); +int Init(int argc, char **argv); /// @brief Initializes the testing environment only for gtest. /// @param argc Argument count. /// @param argv Argument vector. /// @return Exit code from RUN_ALL_TESTS. -int SimpleInit(int argc, char** argv); +int SimpleInit(int argc, char **argv); } // namespace ppc::runners diff --git a/modules/runners/src/runners.cpp b/modules/runners/src/runners.cpp index 0925349f5..e13852874 100644 --- a/modules/runners/src/runners.cpp +++ b/modules/runners/src/runners.cpp @@ -15,7 +15,7 @@ namespace ppc::runners { -void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo& /*test_info*/) { +void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo & /*test_info*/) { int rank = -1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); @@ -42,7 +42,7 @@ void UnreadMessagesDetector::OnTestEnd(const ::testing::TestInfo& /*test_info*/) MPI_Barrier(MPI_COMM_WORLD); } -void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo& test_info) { +void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo &test_info) { if (test_info.result()->Passed()) { return; } @@ -50,7 +50,7 @@ void WorkerTestFailurePrinter::OnTestEnd(const ::testing::TestInfo& test_info) { base_->OnTestEnd(test_info); } -void WorkerTestFailurePrinter::OnTestPartResult(const ::testing::TestPartResult& test_part_result) { +void WorkerTestFailurePrinter::OnTestPartResult(const ::testing::TestPartResult &test_part_result) { if (test_part_result.passed() || test_part_result.skipped()) { return; } @@ -75,7 +75,7 @@ int RunAllTests() { } } // namespace -int Init(int argc, char** argv) { +int Init(int argc, char **argv) { const int init_res = MPI_Init(&argc, &argv); if (init_res != MPI_SUCCESS) { std::cerr << std::format("[ ERROR ] MPI_Init failed with code {}", init_res) << '\n'; @@ -88,11 +88,11 @@ int Init(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); - auto& listeners = ::testing::UnitTest::GetInstance()->listeners(); + auto &listeners = ::testing::UnitTest::GetInstance()->listeners(); int rank = -1; MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (rank != 0 && (argc < 2 || argv[1] != std::string("--print-workers"))) { - auto* listener = listeners.Release(listeners.default_result_printer()); + auto *listener = listeners.Release(listeners.default_result_printer()); listeners.Append(new WorkerTestFailurePrinter(std::shared_ptr<::testing::TestEventListener>(listener))); } listeners.Append(new UnreadMessagesDetector()); @@ -108,7 +108,7 @@ int Init(int argc, char** argv) { return status; } -int SimpleInit(int argc, char** argv) { +int SimpleInit(int argc, char **argv) { // Limit the number of threads in TBB tbb::global_control control(tbb::global_control::max_allowed_parallelism, ppc::util::GetNumThreads()); diff --git a/modules/task/tests/task_tests.cpp b/modules/task/tests/task_tests.cpp index 99ffc9175..f7170e745 100644 --- a/modules/task/tests/task_tests.cpp +++ b/modules/task/tests/task_tests.cpp @@ -41,7 +41,7 @@ namespace ppc::test { template class TestTask : public ppc::task::Task { public: - explicit TestTask(const InType& in) { + explicit TestTask(const InType &in) { this->GetInput() = in; } @@ -69,7 +69,7 @@ class TestTask : public ppc::task::Task { template class FakeSlowTask : public TestTask { public: - explicit FakeSlowTask(const InType& in) : TestTask(in) {} + explicit FakeSlowTask(const InType &in) : TestTask(in) {} bool RunImpl() override { std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -230,7 +230,7 @@ TEST(TaskTest, TaskDestructorThrowsIfStageIncomplete) { { std::vector in(20, 1); struct LocalTask : Task, int32_t> { - explicit LocalTask(const std::vector& in) { + explicit LocalTask(const std::vector &in) { this->GetInput() = in; } bool ValidationImpl() override { @@ -256,7 +256,7 @@ TEST(TaskTest, TaskDestructorThrowsIfEmpty) { { std::vector in(20, 1); struct LocalTask : Task, int32_t> { - explicit LocalTask(const std::vector& in) { + explicit LocalTask(const std::vector &in) { this->GetInput() = in; } bool ValidationImpl() override { @@ -279,7 +279,7 @@ TEST(TaskTest, TaskDestructorThrowsIfEmpty) { TEST(TaskTest, InternalTimeTestThrowsIfTimeoutExceeded) { struct SlowTask : Task, int32_t> { - explicit SlowTask(const std::vector& in) { + explicit SlowTask(const std::vector &in) { this->GetInput() = in; } bool ValidationImpl() override { @@ -346,6 +346,6 @@ TEST(TaskTest, PostProcessingThrowsIfCalledBeforeRun) { EXPECT_THROW(task->PostProcessing(), std::runtime_error); } -int main(int argc, char** argv) { +int main(int argc, char **argv) { return ppc::runners::SimpleInit(argc, argv); } diff --git a/modules/util/include/func_test_util.hpp b/modules/util/include/func_test_util.hpp index 1aac8d67f..9a4e71327 100644 --- a/modules/util/include/func_test_util.hpp +++ b/modules/util/include/func_test_util.hpp @@ -36,7 +36,7 @@ template /// @tparam TestType Type of the test case or parameter. class BaseRunFuncTests : public ::testing::TestWithParam> { public: - virtual bool CheckTestOutputData(OutType& output_data) = 0; + virtual bool CheckTestOutputData(OutType &output_data) = 0; /// @brief Provides input data for the task. /// @return Initialized input data. virtual InType GetTestInputData() = 0; @@ -48,7 +48,7 @@ class BaseRunFuncTests : public ::testing::TestWithParam - static std::string PrintFuncTestName(const GTestFuncParam& info) { + static std::string PrintFuncTestName(const GTestFuncParam &info) { RequireStaticInterface(); TestType test_param = std::get(ppc::util::GTestParamIndex::kTestParams)>(info.param); return std::get(GTestParamIndex::kNameTest)>(info.param) + "_" + @@ -57,7 +57,7 @@ class BaseRunFuncTests : public ::testing::TestWithParam test_param) { - const std::string& test_name = std::get(GTestParamIndex::kNameTest)>(test_param); + const std::string &test_name = std::get(GTestParamIndex::kNameTest)>(test_param); ValidateTestName(test_name); @@ -73,16 +73,16 @@ class BaseRunFuncTests : public ::testing::TestWithParam& test_param) { + void InitializeAndRunTask(const FuncTestParam &test_param) { task_ = std::get(GTestParamIndex::kTaskGetter)>(test_param)(GetTestInputData()); ExecuteTaskPipeline(); } @@ -110,18 +110,18 @@ class BaseRunFuncTests : public ::testing::TestWithParam -auto ExpandToValuesImpl(const Tuple& t, std::index_sequence /*unused*/) { +auto ExpandToValuesImpl(const Tuple &t, std::index_sequence /*unused*/) { return ::testing::Values(std::get(t)...); } template -auto ExpandToValues(const Tuple& t) { +auto ExpandToValues(const Tuple &t) { constexpr std::size_t kN = std::tuple_size_v; return ExpandToValuesImpl(t, std::make_index_sequence{}); } template -auto GenTaskTuplesImpl(const SizesContainer& sizes, const std::string& settings_path, +auto GenTaskTuplesImpl(const SizesContainer &sizes, const std::string &settings_path, std::index_sequence /*unused*/) { return std::make_tuple(std::make_tuple(ppc::task::TaskGetter, std::string(GetNamespace()) + "_" + @@ -130,13 +130,13 @@ auto GenTaskTuplesImpl(const SizesContainer& sizes, const std::string& settings_ } template -auto TaskListGenerator(const SizesContainer& sizes, const std::string& settings_path) { +auto TaskListGenerator(const SizesContainer &sizes, const std::string &settings_path) { return GenTaskTuplesImpl(sizes, settings_path, std::make_index_sequence>>{}); } template -constexpr auto AddFuncTask(const SizesContainer& sizes, const std::string& settings_path) { +constexpr auto AddFuncTask(const SizesContainer &sizes, const std::string &settings_path) { return TaskListGenerator(sizes, settings_path); } diff --git a/modules/util/include/perf_test_util.hpp b/modules/util/include/perf_test_util.hpp index 3c4abec75..827f501d7 100644 --- a/modules/util/include/perf_test_util.hpp +++ b/modules/util/include/perf_test_util.hpp @@ -34,18 +34,18 @@ template class BaseRunPerfTests : public ::testing::TestWithParam> { public: /// @brief Generates a readable name for the performance test case. - static std::string CustomPerfTestName(const ::testing::TestParamInfo>& info) { + static std::string CustomPerfTestName(const ::testing::TestParamInfo> &info) { return ppc::performance::GetStringParamName( std::get(GTestParamIndex::kTestParams)>(info.param)) + "_" + std::get(GTestParamIndex::kNameTest)>(info.param); } protected: - virtual bool CheckTestOutputData(OutType& output_data) = 0; + virtual bool CheckTestOutputData(OutType &output_data) = 0; /// @brief Supplies input data for performance testing. virtual InType GetTestInputData() = 0; - virtual void SetPerfAttributes(ppc::performance::PerfAttr& perf_attrs) { + virtual void SetPerfAttributes(ppc::performance::PerfAttr &perf_attrs) { if (task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kMPI || task_->GetDynamicTypeOfTask() == ppc::task::TypeOfTask::kALL) { const double t0 = GetTimeMPI(); @@ -67,7 +67,7 @@ class BaseRunPerfTests : public ::testing::TestWithParam& perf_test_param) { + void ExecuteTest(const PerfTestParam &perf_test_param) { auto task_getter = std::get(GTestParamIndex::kTaskGetter)>(perf_test_param); auto test_name = std::get(GTestParamIndex::kNameTest)>(perf_test_param); auto mode = std::get(GTestParamIndex::kTestParams)>(perf_test_param); @@ -105,7 +105,7 @@ class BaseRunPerfTests : public ::testing::TestWithParam -auto MakePerfTaskTuples(const std::string& settings_path) { +auto MakePerfTaskTuples(const std::string &settings_path) { const auto name = std::string(GetNamespace()) + "_" + ppc::task::GetStringTaskType(TaskType::GetStaticTypeOfTask(), settings_path); @@ -116,18 +116,18 @@ auto MakePerfTaskTuples(const std::string& settings_path) { } template -auto TupleToGTestValuesImpl(const Tuple& tup, std::index_sequence /*unused*/) { +auto TupleToGTestValuesImpl(const Tuple &tup, std::index_sequence /*unused*/) { return ::testing::Values(std::get(tup)...); } template -auto TupleToGTestValues(Tuple&& tup) { +auto TupleToGTestValues(Tuple &&tup) { constexpr size_t kSize = std::tuple_size_v>; return TupleToGTestValuesImpl(std::forward(tup), std::make_index_sequence{}); } template -auto MakeAllPerfTasks(const std::string& settings_path) { +auto MakeAllPerfTasks(const std::string &settings_path) { return std::tuple_cat(MakePerfTaskTuples(settings_path)...); } diff --git a/modules/util/include/util.hpp b/modules/util/include/util.hpp index a69d7afbe..362c45882 100644 --- a/modules/util/include/util.hpp +++ b/modules/util/include/util.hpp @@ -55,7 +55,7 @@ class DestructorFailureFlag { enum class GTestParamIndex : uint8_t { kTaskGetter, kNameTest, kTestParams }; -std::string GetAbsoluteTaskPath(const std::string& id_path, const std::string& relative_path); +std::string GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path); int GetNumThreads(); int GetNumProc(); double GetTaskMaxTime(); @@ -66,13 +66,13 @@ std::string GetNamespace() { std::string name = typeid(T).name(); #ifdef __GNUC__ int status = 0; - std::unique_ptr demangled{abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), - std::free}; + std::unique_ptr demangled{abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), + std::free}; name = (status == 0) ? demangled.get() : name; #endif #ifdef _MSC_VER const std::string prefixes[] = {"class ", "struct ", "enum ", "union "}; - for (const auto& prefix : prefixes) { + for (const auto &prefix : prefixes) { if (name.starts_with(prefix)) { name = name.substr(prefix.size()); break; diff --git a/modules/util/src/util.cpp b/modules/util/src/util.cpp index 97a777281..34c065388 100644 --- a/modules/util/src/util.cpp +++ b/modules/util/src/util.cpp @@ -8,14 +8,14 @@ namespace { -std::string GetAbsolutePath(const std::string& relative_path) { +std::string GetAbsolutePath(const std::string &relative_path) { std::filesystem::path path = std::filesystem::path(PPC_PATH_TO_PROJECT) / "tasks" / relative_path; return path.string(); } } // namespace -std::string ppc::util::GetAbsoluteTaskPath(const std::string& id_path, const std::string& relative_path) { +std::string ppc::util::GetAbsoluteTaskPath(const std::string &id_path, const std::string &relative_path) { std::filesystem::path task_relative = std::filesystem::path(id_path) / "data" / relative_path; return GetAbsolutePath(task_relative.string()); } @@ -60,7 +60,7 @@ constexpr std::array kMpiEnvVars = { "HYDRA_CONTROL_FD", "PMIX_RANK", "SLURM_PROCID", "MSMPI_RANK", "MSMPI_LOCALRANK"}; bool ppc::util::IsUnderMpirun() { - return std::ranges::any_of(kMpiEnvVars, [&](const auto& env_var) { + return std::ranges::any_of(kMpiEnvVars, [&](const auto &env_var) { const auto mpi_env = env::get(env_var); return static_cast(mpi_env.has_value()); }); diff --git a/scoreboard/README.md b/scoreboard/README.md index 69cadd6c1..b43c87571 100644 --- a/scoreboard/README.md +++ b/scoreboard/README.md @@ -16,8 +16,9 @@ Generates `output_directory/index.html` with the scoreboard. ## Configuration -- `data/threads-config.yml` - Task points, deadlines, penalties +- `data/points-info.yml` - Task points, deadlines, penalties - `data/plagiarism.yml` - Flagged submissions +- `data/deadlines.yml` - Optional display deadlines and day offsets ## Testing @@ -31,4 +32,26 @@ python -m pytest tests/ -v ## Output -HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), P (plagiarism), Total. +HTML table with columns: S (solution), A (acceleration), E (efficiency), D (deadline), C (copying), Total. + +### Deadlines display + +- Threads deadlines are auto-distributed across the Spring window: 1 Feb → 15 May. +- Processes deadlines are auto-distributed across the Autumn window: 15 Oct → 14 Dec. +- Due time is 23:59 MSK on the shown date. +- File `data/deadlines.yml` can shift dates per item by integer day offsets (default 0). Example: + +``` +threads: + seq: 0 # no shift + omp: -2 # 2 days earlier + tbb: 3 # 3 days later + stl: 0 + all: 0 +processes: + task_1: 0 + task_2: 5 + task_3: -1 +``` + +- If you put a non-integer string instead of a number, it is used as-is as the label (e.g., `"10 Nov"`). diff --git a/scoreboard/assign_variant.py b/scoreboard/assign_variant.py new file mode 100644 index 000000000..feb021fa3 --- /dev/null +++ b/scoreboard/assign_variant.py @@ -0,0 +1,142 @@ +# file: assign_variant.py +""" +Deterministic variant assignment from Full Name + Group +with the repository name as the ONLY salt. + +Algorithm: + 1) Normalize strings (NFKC, trim, lowercase, map 'ё'->'е', collapse spaces). + 2) Build a key: "surname|name|patronymic|group|repo". + 3) SHA-256(key) -> big integer -> modulo `num_variants`. + +Properties: +- Stable: same inputs → same output. +- Uniform: modulo of a cryptographic hash distributes evenly. +- Note: Without the full group roster, zero collisions cannot be *guaranteed* + (birthday paradox). This is intended for “approximately unique” per-group use. + +Usage: + from assign_variant import assign_variant + v = assign_variant( + surname="Petrov", + name="Pyotr", + patronymic="Petrovich", + group="MEN-201", + repo="learning-process/parallel_programming_course", + num_variants=31, # produces values in 0..30 + ) + print(v) +""" + +from __future__ import annotations + +import hashlib +import re +import unicodedata +from typing import Optional + +__all__ = ["assign_variant", "normalize"] + + +def normalize(s: Optional[str]) -> str: + """ + Normalize a string: + - Unicode NFKC, + - trim, + - lowercase, + - map Cyrillic 'ё' -> 'е' (common normalization in Russian names), + - collapse multiple spaces to a single space. + + None -> '' (empty string). + """ + if not s: + return "" + s = unicodedata.normalize("NFKC", s).strip().lower() + s = s.replace("ё", "е") + s = re.sub(r"\s+", " ", s) + return s + + +def _hash_int(key: str) -> int: + """Return SHA-256(key) as a big integer.""" + return int.from_bytes(hashlib.sha256(key.encode("utf-8")).digest(), "big") + + +def assign_variant( + surname: str, + name: str, + group: str, + repo: str, + patronymic: Optional[str] = "", + num_variants: int = 31, +) -> int: + """ + Deterministically returns a variant index in [0 .. num_variants-1] + based on (surname, name, patronymic, group) and the repository name (repo) + as the sole salt. + + :param surname: Last name + :param name: First name + :param group: Group identifier (e.g., "MEN-201") + :param repo: Repository name used as salt (e.g., "org/repo" or just "repo") + :param patronymic: Middle name / patronymic (optional) + :param num_variants: Total number of variants (> 0). Output range: 0..num_variants-1 + :return: int — the variant index + """ + if not isinstance(num_variants, int) or num_variants < 1: + raise ValueError("num_variants must be a positive integer (> 0)") + if not repo or not isinstance(repo, str): + raise ValueError("repo must be a non-empty string") + + key = "|".join( + ( + normalize(surname), + normalize(name), + normalize(patronymic), + normalize(group), + normalize(repo), + ) + ) + h = _hash_int(key) + return h % num_variants + + +# Minimal self-check when executed directly (no CLI arguments). +if __name__ == "__main__": + + def demo(): + print("Demo: deterministic assignment\n") + + v1 = assign_variant( + surname="Петров", + name="Пётр", + patronymic="Петрович", + group="МЕН-201", + repo="learning-process/parallel_programming_course", + num_variants=31, + ) + # Different casing/spacing/ё→е should not change the result: + v2 = assign_variant( + surname="ПЕТРОВ", + name="петр ", + patronymic="пЕТРОВИЧ", + group=" мен-201 ", + repo="learning-process/parallel_programming_course", + num_variants=31, + ) + assert v1 == v2, "Normalization should make results identical" + + v_other_repo = assign_variant( + surname="Petrov", + name="Pyotr", + patronymic="Petrovich", + group="MEN-201", + repo="learning-process/ppc_2025_fall", # different salt → likely different value + num_variants=31, + ) + + print(f"Variant (repo=A): {v1}") + print(f"Variant (same inputs, normalized): {v2}") + print(f"Variant (repo=B): {v_other_repo}") + print("\nOK: deterministic & normalized.") + + demo() diff --git a/scoreboard/data/copying.yml b/scoreboard/data/copying.yml new file mode 100644 index 000000000..f38f6fbad --- /dev/null +++ b/scoreboard/data/copying.yml @@ -0,0 +1,14 @@ +threads: + copying: + seq: + - example_threads + omp: [] + tbb: [] + stl: [] + all: [] +processes: + copying: + mpi: + - example_processes + seq: + - example_processes diff --git a/scoreboard/data/deadlines.yml b/scoreboard/data/deadlines.yml new file mode 100644 index 000000000..152422d2e --- /dev/null +++ b/scoreboard/data/deadlines.yml @@ -0,0 +1,13 @@ +threads: + # Put integer to shift auto date by N days (negative allowed). Default 0. + seq: 0 + omp: 0 + tbb: 0 + stl: 0 + all: 0 + +processes: + # Use integer offsets for tasks; default 0. + task_1: 0 + task_2: 0 + task_3: 0 diff --git a/scoreboard/data/performance.yml b/scoreboard/data/performance.yml deleted file mode 100644 index e4d741028..000000000 --- a/scoreboard/data/performance.yml +++ /dev/null @@ -1 +0,0 @@ -performance: diff --git a/scoreboard/data/plagiarism.yml b/scoreboard/data/plagiarism.yml deleted file mode 100644 index 3a4d581a4..000000000 --- a/scoreboard/data/plagiarism.yml +++ /dev/null @@ -1,8 +0,0 @@ -plagiarism: - mpi: [] - omp: [] - seq: - - broken_example - stl: [] - tbb: [] - all: [] diff --git a/scoreboard/data/points-info.yml b/scoreboard/data/points-info.yml new file mode 100644 index 000000000..82c5b774f --- /dev/null +++ b/scoreboard/data/points-info.yml @@ -0,0 +1,63 @@ +processes: + semester_total: 70 + tasks: + - name: mpi_task_1 + mpi: + - S: 8 + - A: 0 + seq: + - S: 2 + R: 2 + variants_max: 27 + Total: 12 + - name: mpi_task_2 + mpi: + - S: 12 + - A: 5 + seq: + - S: 3 + R: 3 + variants_max: 23 + Total: 23 + - name: mpi_task_3 + mpi: + - S: 16 + - A: 10 + seq: + - S: 4 + R: 5 + variants_max: 32 + Total: 35 +threads: + semester_total: 64 + variants_max: 30 + tasks: + - name: seq + S: 4 + R: 1 + Total: 5 + - name: omp + S: 6 + A: 3 + R: 2 + Total: 11 + - name: tbb + S: 6 + A: 3 + R: 2 + Total: 11 + - name: stl + S: 8 + A: 6 + R: 2 + Total: 16 + - name: all + S: 10 + A: 8 + R: 3 + Total: 21 +efficiency: + num_proc: 4 +copying: + coefficient: 0.5 + note: "Penalty C = -coefficient * S (scoreboard notation)" diff --git a/scoreboard/data/threads-config.yml b/scoreboard/data/threads-config.yml deleted file mode 100644 index ed8b5683c..000000000 --- a/scoreboard/data/threads-config.yml +++ /dev/null @@ -1,49 +0,0 @@ -scoreboard: - task: - mpi: - solution: - max: 0 - performance: - max: 0 - visible: true - omp: - solution: - max: 6 - performance: - max: 3 - visible: true - seq: - solution: - max: 4 - performance: - max: 0 - visible: true - stl: - solution: - max: 8 - performance: - max: 6 - visible: true - tbb: - solution: - max: 6 - performance: - max: 3 - visible: true - all: - solution: - max: 10 - performance: - max: 8 - visible: true - plagiarism: - coefficient: 0.5 - efficiency: - num_proc: 4 - deadlines: - mpi: "2025-12-31" - omp: "2025-12-31" - seq: "2025-12-31" - stl: "2025-12-31" - tbb: "2025-12-31" - all: "2025-12-31" diff --git a/scoreboard/main.py b/scoreboard/main.py index 24b3f2b4b..25c1206d0 100644 --- a/scoreboard/main.py +++ b/scoreboard/main.py @@ -8,16 +8,58 @@ import shutil from jinja2 import Environment, FileSystemLoader import logging +import sys + +# Try ZoneInfo from stdlib, then from backports, else fall back to naive time +try: + from zoneinfo import ZoneInfo # type: ignore +except Exception: # pragma: no cover - fallback for Python < 3.9 + try: + from backports.zoneinfo import ZoneInfo # type: ignore + except Exception: # Last resort: define a stub + ZoneInfo = None # type: ignore logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") logger = logging.getLogger(__name__) task_types = ["all", "mpi", "omp", "seq", "stl", "tbb"] -task_types_threads = ["all", "omp", "seq", "stl", "tbb"] +# Threads table order: seq first, then omp, tbb, stl, all +task_types_threads = ["seq", "omp", "tbb", "stl", "all"] task_types_processes = ["mpi", "seq"] script_dir = Path(__file__).parent tasks_dir = script_dir.parent / "tasks" +# Salt is derived from the repository root directory name (dynamic) +REPO_ROOT = script_dir.parent.resolve() +# Salt format: "learning_process/" +REPO_SALT = f"learning_process/{REPO_ROOT.name}" + +# Ensure we can import assign_variant from scoreboard directory +if str(script_dir) not in sys.path: + sys.path.insert(0, str(script_dir)) +try: + from assign_variant import assign_variant +except Exception: + + def assign_variant( + surname: str, + name: str, + group: str, + repo: str, + patronymic: str = "", + num_variants: int = 1, + ) -> int: + return 0 + + +def _now_msk(): + """Return current datetime in MSK if tz support is available, else local time.""" + try: + if ZoneInfo is not None: + return datetime.now(ZoneInfo("Europe/Moscow")) + except Exception: + pass + return datetime.now() def _read_tasks_type(task_dir: Path) -> str | None: @@ -112,9 +154,152 @@ def calculate_performance_metrics(perf_val, eff_num_proc, task_type): return acceleration, efficiency +def _find_max_solution(points_info, task_type: str) -> int: + """Resolve max S for a given task type from points-info (threads list).""" + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("S", 0)) + except Exception: + return 0 + if task_type == "mpi": + return 0 + return 0 + + +def _find_report_max(points_info, task_type: str) -> int: + """Resolve max Report (R) points for a given task type from points-info (threads). + Returns 0 if not found. + """ + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("R", 0)) + except Exception: + return 0 + return 0 + + +def _find_performance_max(points_info, task_type: str) -> int: + """Resolve max Performance (A) points for a given task type (threads).""" + threads_tasks = (points_info.get("threads", {}) or {}).get("tasks", []) + for t in threads_tasks: + if str(t.get("name")) == task_type: + try: + return int(t.get("A", 0)) + except Exception: + return 0 + return 0 + + +def _calc_perf_points_from_efficiency(efficiency_str: str, max_points: int) -> float: + """Calculate Performance points as a real number (x.yy). + + Mapping (eff -> percent of max): + >=50 -> 100; [45,50) -> 90; [42,45) -> 80; [40,42) -> 70; [37,40) -> 60; + [35,37) -> 50; [32,35) -> 40; [30,32) -> 30; [27,30) -> 20; [25,27) -> 10; <25 -> 0 + Returns a float rounded to 2 decimals (no ceil). + """ + if not isinstance(efficiency_str, str) or not efficiency_str.endswith("%"): + return 0.0 + try: + val = float(efficiency_str.rstrip("%")) + except Exception: + return 0.0 + perc = 0.0 + if val >= 50: + perc = 1.0 + elif 45 <= val < 50: + perc = 0.9 + elif 42 <= val < 45: + perc = 0.8 + elif 40 <= val < 42: + perc = 0.7 + elif 37 <= val < 40: + perc = 0.6 + elif 35 <= val < 37: + perc = 0.5 + elif 32 <= val < 35: + perc = 0.4 + elif 30 <= val < 32: + perc = 0.3 + elif 27 <= val < 30: + perc = 0.2 + elif 25 <= val < 27: + perc = 0.1 + else: + perc = 0.0 + pts = max_points * perc if max_points > 0 else 0.0 + # round to 2 decimals (banker's rounding acceptable here) + return round(pts, 2) + + +def _find_process_report_max(points_info, task_number: int) -> int: + """Get max report (R) points for process task by ordinal (1..3). + Looks up processes.tasks with names like 'mpi_task_1'. + """ + proc = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc: + if str(t.get("name")) == key: + try: + return int(t.get("R", 0)) + except Exception: + return 0 + return 0 + + +def _find_process_points(points_info, task_number: int) -> tuple[int, int, int, int]: + """Return (S_mpi, S_seq, A_mpi, R) maxima for a given process task ordinal (1..3). + Supports both mapping and list-of-maps (per user's YAML example). + """ + proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc_tasks: + if str(t.get("name")) == key: + + def _extract(obj, k): + if isinstance(obj, dict): + return int(obj.get(k, 0)) + if isinstance(obj, list): + for it in obj: + if isinstance(it, dict) and k in it: + try: + return int(it.get(k, 0)) + except Exception: + return 0 + return 0 + + mpi_blk = t.get("mpi", {}) + seq_blk = t.get("seq", {}) + s_mpi = _extract(mpi_blk, "S") + a_mpi = _extract(mpi_blk, "A") + s_seq = _extract(seq_blk, "S") + try: + r = int(t.get("R", 0)) + except Exception: + r = 0 + return s_mpi, s_seq, a_mpi, r + return 0, 0, 0, 0 + + +def _find_process_variants_max(points_info, task_number: int) -> int: + proc_tasks = (points_info.get("processes", {}) or {}).get("tasks", []) + key = f"mpi_task_{task_number}" + for t in proc_tasks: + if str(t.get("name")) == key: + try: + return int(t.get("variants_max", 1)) + except Exception: + return 1 + return 1 + + def get_solution_points_and_style(task_type, status, cfg): """Get solution points and CSS style based on task type and status.""" - max_sol_points = int(cfg["scoreboard"]["task"][task_type]["solution"]["max"]) + max_sol_points = _find_max_solution(cfg, task_type) sol_points = max_sol_points if status in ("done", "disabled") else 0 solution_style = "" if status == "done": @@ -125,17 +310,50 @@ def get_solution_points_and_style(task_type, status, cfg): def check_plagiarism_and_calculate_penalty( - dir, task_type, sol_points, plagiarism_cfg, cfg + dir, task_type, sol_points, plagiarism_cfg, cfg, semester: str | None ): - """Check if task is plagiarized and calculate penalty points.""" + """Check if task is plagiarized and calculate penalty points. + + Supports two config layouts: + - legacy: { plagiarism: { seq: [...], omp: [...], ... } } + - semesters: { threads: {plagiarism: {...}}, processes: {plagiarism: {...}} } + """ clean_dir = dir[: -len("_disabled")] if dir.endswith("_disabled") else dir - is_cheated = ( - dir in plagiarism_cfg["plagiarism"][task_type] - or clean_dir in plagiarism_cfg["plagiarism"][task_type] - ) + + # Resolve copying/plagiarism mapping based on layout + plag_map = {} + if isinstance(plagiarism_cfg, dict) and ( + "copying" in plagiarism_cfg or "plagiarism" in plagiarism_cfg + ): + plag_map = ( + plagiarism_cfg.get("copying") + if "copying" in plagiarism_cfg + else plagiarism_cfg.get("plagiarism", {}) + ) or {} + elif ( + isinstance(plagiarism_cfg, dict) + and semester + and semester in plagiarism_cfg + and isinstance(plagiarism_cfg[semester], dict) + ): + inner = plagiarism_cfg[semester] + plag_map = ( + inner.get("copying") if "copying" in inner else inner.get("plagiarism", {}) + ) or {} + + flagged_list = set(plag_map.get(task_type, []) or []) + is_cheated = dir in flagged_list or clean_dir in flagged_list plagiarism_points = 0 if is_cheated: - plag_coeff = float(cfg["scoreboard"]["plagiarism"]["coefficient"]) + # Prefer new key 'copying', fallback to legacy 'plagiarism' + try: + plag_coeff = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff = 0.0 plagiarism_points = -plag_coeff * sol_points return is_cheated, plagiarism_points @@ -170,22 +388,22 @@ def calculate_deadline_penalty(dir, task_type, status, deadlines_cfg, tasks_dir) def load_configurations(): - """Load all configuration files and return parsed data.""" - config_path = Path(__file__).parent / "data" / "threads-config.yml" - assert config_path.exists(), f"Config file not found: {config_path}" - with open(config_path, "r") as file: - cfg = yaml.safe_load(file) - assert cfg, "Configuration is empty" + """Load points-info (max points, deadlines, efficiency) and plagiarism lists.""" + points_info_path = Path(__file__).parent / "data" / "points-info.yml" + assert points_info_path.exists(), f"Points info file not found: {points_info_path}" + with open(points_info_path, "r") as f: + points_info = yaml.safe_load(f) + assert points_info, "Points info is empty" - eff_num_proc = int(cfg["scoreboard"].get("efficiency", {}).get("num_proc", 1)) - deadlines_cfg = cfg["scoreboard"].get("deadlines", {}) + eff_num_proc = int(points_info.get("efficiency", {}).get("num_proc", 1)) + deadlines_cfg = points_info.get("deadlines", {}) - plagiarism_config_path = Path(__file__).parent / "data" / "plagiarism.yml" + plagiarism_config_path = Path(__file__).parent / "data" / "copying.yml" with open(plagiarism_config_path, "r") as file: plagiarism_cfg = yaml.safe_load(file) assert plagiarism_cfg, "Plagiarism configuration is empty" - return cfg, eff_num_proc, deadlines_cfg, plagiarism_cfg + return points_info, eff_num_proc, deadlines_cfg, plagiarism_cfg def _build_rows_for_task_types( @@ -218,18 +436,24 @@ def _load_student_info_label(dir_name: str): except Exception: return None - def _load_variant(dir_name: str): + def _load_student_fields(dir_name: str): import json info_path = tasks_dir / dir_name / "info.json" if not info_path.exists(): - return "?" + return None try: with open(info_path, "r") as f: data = json.load(f) - return str(data.get("student", {}).get("variant_number", "?")) + s = data.get("student", {}) + return ( + str(s.get("last_name", "")), + str(s.get("first_name", "")), + str(s.get("middle_name", "")), + str(s.get("group_number", "")), + ) except Exception: - return "?" + return None for dir in sorted(dir_names): row_types = [] @@ -242,7 +466,7 @@ def _load_variant(dir_name: str): task_points = sol_points is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - dir, task_type, sol_points, plagiarism_cfg, cfg + dir, task_type, sol_points, plagiarism_cfg, cfg, semester="threads" ) task_points += plagiarism_points @@ -258,6 +482,23 @@ def _load_variant(dir_name: str): dir, task_type, status, deadlines_cfg, tasks_dir ) + # Report presence: award R only if report.md exists inside the task directory + report_present = (tasks_dir / dir / "report.md").exists() + report_points = _find_report_max(cfg, task_type) if report_present else 0 + + # Performance points P for non-seq types, based on efficiency + perf_max = _find_performance_max(cfg, task_type) + if task_type != "seq": + perf_points = _calc_perf_points_from_efficiency(efficiency, perf_max) + perf_points_display = ( + f"{perf_points:.2f}" + if isinstance(efficiency, str) and efficiency.endswith("%") + else "—" + ) + else: + perf_points = 0.0 + perf_points_display = "—" + row_types.append( { "solution_points": sol_points, @@ -265,15 +506,37 @@ def _load_variant(dir_name: str): "perf": perf_val, "acceleration": acceleration, "efficiency": efficiency, + "perf_points": perf_points, + "perf_points_display": perf_points_display, "deadline_points": deadline_points, "plagiarised": is_cheated, "plagiarism_points": plagiarism_points, + "report": report_points, } ) - total_count += task_points + # Total: include Solution + Performance + Report + Copying penalty (exclude Deadline) + total_count += task_points + perf_points + report_points label_name = _load_student_info_label(dir) or dir - variant = _load_variant(dir) + # Generate variant for threads based on student info and variants_max + threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) + fields = _load_student_fields(dir) + if fields: + last, first, middle, group = fields + try: + v_idx = assign_variant( + last, + first, + group, + REPO_SALT, + patronymic=middle, + num_variants=threads_vmax, + ) + variant = str(v_idx + 1) + except Exception: + variant = "?" + else: + variant = "?" rows.append( { "task": label_name, @@ -301,6 +564,94 @@ def main(): env = Environment(loader=FileSystemLoader(Path(__file__).parent / "templates")) + # Load optional display deadlines from deadlines.yml and/or auto-compute evenly + deadlines_display_threads: dict[str, str] | None = None + deadlines_display_processes: dict[str, str] | None = None + try: + dl_file = script_dir / "data" / "deadlines.yml" + if dl_file.exists(): + with open(dl_file, "r") as f: + dl_cfg = yaml.safe_load(f) or {} + deadlines_display_threads = dl_cfg.get("threads") or {} + deadlines_display_processes = dl_cfg.get("processes") or {} + except Exception: + pass + + # Helper: compute evenly spaced dates for current semester (MSK) + from datetime import date, timedelta + import calendar + + def _abbr(day: date) -> str: + return f"{day.day} {calendar.month_abbr[day.month]}" + + def _spring_bounds(today: date) -> tuple[date, date]: + """Return [1 Feb .. 15 May] window for the appropriate year. + If today is past 15 May, use next year's spring; otherwise this year's. + """ + y = today.year + start = date(y, 2, 1) + end = date(y, 5, 15) + if today > end: + y += 1 + start = date(y, 2, 1) + end = date(y, 5, 15) + return start, end + + def _autumn_bounds(today: date) -> tuple[date, date]: + """Return [15 Oct .. 14 Dec] window for the appropriate year. + If today is past 14 Dec, use next year's autumn; otherwise this year's. + """ + y = today.year + start = date(y, 10, 15) + end = date(y, 12, 14) + if today > end: + y += 1 + start = date(y, 10, 15) + end = date(y, 12, 14) + return start, end + + def _evenly_spaced_dates(n: int, start: date, end: date) -> list[date]: + """ + Return n deadlines evenly spaced across the window (start..end], + i.e., strictly after the start date, with the last at end. + Positions are at fractions (i+1)/n of the total span. + """ + if n <= 1: + return [end] + total = (end - start).days + if total < 0: + start, end = end, start + total = -total + res = [] + for i in range(n): + off = int(round((i + 1) * total / n)) + if off <= 0: + off = 1 + if off > total: + off = total + res.append(start + timedelta(days=off)) + return res + + def _compute_display_deadlines_threads(order: list[str]) -> dict[str, date]: + # Threads = Spring semester (prefer MSK; fallback to local time) + try: + today = _now_msk().date() + except Exception: + today = datetime.now().date() + s, e = _spring_bounds(today) + ds = _evenly_spaced_dates(len(order), s, e) + return {t: d for t, d in zip(order, ds)} + + def _compute_display_deadlines_processes(n_items: int) -> list[date]: + # Processes = Autumn semester (prefer MSK; fallback to local time) + try: + today = _now_msk().date() + except Exception: + today = datetime.now().date() + s, e = _autumn_bounds(today) + ds = _evenly_spaced_dates(n_items, s, e) + return ds + # Locate perf CSV from CI or local runs candidates = [ script_dir.parent / "build" / "perf_stat_dir" / "task_run_perf_table.csv", @@ -366,7 +717,7 @@ def _build_cell(dir_name: str, ttype: str): sol_points, solution_style = get_solution_points_and_style(ttype, status, cfg) task_points = sol_points is_cheated, plagiarism_points = check_plagiarism_and_calculate_penalty( - dir_name, ttype, sol_points, plagiarism_cfg, cfg + dir_name, ttype, sol_points, plagiarism_cfg, cfg, semester="processes" ) task_points += plagiarism_points perf_val = perf_stats.get(dir_name, {}).get(ttype, "?") @@ -422,6 +773,7 @@ def _build_cell(dir_name: str, ttype: str): proc_group_headers = [] proc_top_headers = [] proc_groups = [] + proc_r_values = [] total_points_sum = 0 for n in expected_numbers: entry = num_to_dir.get(n) @@ -432,10 +784,60 @@ def _build_cell(dir_name: str, ttype: str): # Second header row shows only mpi/seq proc_group_headers.append({"type": "mpi"}) proc_group_headers.append({"type": "seq"}) + group_cells = [] for ttype in ["mpi", "seq"]: - cell, pts = _build_cell(d, ttype) - proc_groups.append(cell) - total_points_sum += pts + cell, _ = _build_cell(d, ttype) + group_cells.append(cell) + # Override displayed points for processes: S under MPI/SEQ from points-info; A points under MPI only + s_mpi, s_seq, a_mpi, r_max = _find_process_points(cfg, n) + has_mpi = bool(directories[d].get("mpi")) + has_seq = bool(directories[d].get("seq")) + report_present = (tasks_dir / d / "report.md").exists() + group_cells[0]["solution_points"] = s_mpi if has_mpi else 0 + group_cells[1]["solution_points"] = s_seq if has_seq else 0 + # Calculate Performance P for MPI based on efficiency and max a_mpi + mpi_eff = group_cells[0].get("efficiency", "N/A") + perf_points_mpi = ( + _calc_perf_points_from_efficiency(mpi_eff, a_mpi) + if (has_mpi and has_seq) + else 0 + ) + # Display '—' instead of 0 when metrics are absent (efficiency not a percent) + if isinstance(mpi_eff, str) and mpi_eff.endswith("%"): + perf_points_mpi_display = perf_points_mpi + else: + perf_points_mpi_display = "—" + group_cells[0]["perf_points"] = perf_points_mpi + group_cells[0]["perf_points_display"] = perf_points_mpi_display + group_cells[1]["perf_points"] = 0 + # Recompute plagiarism penalty based on processes S maxima + try: + plag_coeff = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff = 0.0 + p_mpi = ( + -plag_coeff * s_mpi + if (has_mpi and group_cells[0].get("plagiarised")) + else 0 + ) + p_seq = ( + -plag_coeff * s_seq + if (has_seq and group_cells[1].get("plagiarised")) + else 0 + ) + group_cells[0]["plagiarism_points"] = p_mpi + group_cells[1]["plagiarism_points"] = p_seq + proc_groups.extend(group_cells) + # Sum points S + P + R + C (penalty negative) with gating + s_inc = (s_mpi if has_mpi else 0) + (s_seq if has_seq else 0) + p_inc = perf_points_mpi + r_inc = r_max if report_present else 0 + total_points_sum += s_inc + p_inc + r_inc + p_mpi + p_seq + proc_r_values.append(r_inc) else: proc_group_headers.append({"type": "mpi", "task_label": f"task_{n}"}) proc_group_headers.append({"type": "seq", "task_label": f"task_{n}"}) @@ -453,7 +855,8 @@ def _build_cell(dir_name: str, ttype: str): "plagiarism_points": "?", } ) - # Do not affect total; sum only existing tasks + # Do not affect total; sum only existing tasks; report points 0 + proc_r_values.append(0) # Label for processes row: show Last, First, Middle on separate lines; no group number row_label = "processes" @@ -466,30 +869,35 @@ def _build_cell(dir_name: str, ttype: str): name = "
".join(name_parts) row_label = name or row_label - # Choose variant from the first available task (1..3) - def _load_variant(dir_name: str): - import json - - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return "?" - try: - with open(info_path, "r") as f: - data = json.load(f) - return str(data.get("student", {}).get("variant_number", "?")) - except Exception: - return "?" - - for n in expected_numbers: - ent = num_to_dir.get(n) - if ent: - row_variant = _load_variant(ent[0]) - break + # Build three variants (one per task) based on student identity + row_variant = "?" + if target_identity: + parts = target_identity.split("|") + if len(parts) >= 4: + first, last, middle, group = parts[0], parts[1], parts[2], parts[3] + variants_render = [] + for n in expected_numbers: + vmax = _find_process_variants_max(cfg, n) + try: + v_idx = assign_variant( + surname=last, + name=first, + patronymic=middle, + group=group, + repo=f"{REPO_SALT}/processes/task-{n}", + num_variants=vmax, + ) + variants_render.append(str(v_idx + 1)) + except Exception: + variants_render.append("?") + row_variant = "
".join(variants_render) processes_rows = [ { "task": row_label, "variant": row_variant, "groups": proc_groups, + "r_values": proc_r_values, + "r_total": sum(proc_r_values), "total": total_points_sum, } ] @@ -504,16 +912,79 @@ def _load_variant(dir_name: str): output_path.mkdir(parents=True, exist_ok=True) # Render tables + generated_msk = _now_msk().strftime("%Y-%m-%d %H:%M:%S") table_template = env.get_template("index.html.j2") + threads_vmax = int((cfg.get("threads", {}) or {}).get("variants_max", 1)) + # Build display deadlines (use file values if present, fill missing with auto) + threads_order = task_types_threads + auto_threads_dl = _compute_display_deadlines_threads(threads_order) + dl_threads_out = {} + for t in threads_order: + base_date = auto_threads_dl.get(t) + # Default = 0 shift + shift_days = 0 + label = None + if deadlines_display_threads and t in deadlines_display_threads: + val = deadlines_display_threads.get(t) + if isinstance(val, int): + shift_days = val + else: + # try int-like string, else treat as explicit label + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + dl_threads_out[t] = _abbr(vdate) + else: + dl_threads_out[t] = label or "" + threads_html = table_template.render( - task_types=task_types_threads, rows=threads_rows + task_types=task_types_threads, + rows=threads_rows, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + threads_variants_max=threads_vmax, + deadlines_threads=dl_threads_out, ) # Use dedicated template for processes table layout processes_template = env.get_template("processes.html.j2") + proc_vmaxes = [_find_process_variants_max(cfg, n) for n in expected_numbers] + # Build display deadlines for processes in task order (1..3) + auto_proc_dl = _compute_display_deadlines_processes(len(expected_numbers)) + proc_deadlines_list: list[str] = [] + for i, n in enumerate(expected_numbers): + base_date = auto_proc_dl[i] + shift_days = 0 + label = None + if deadlines_display_processes: + key = f"task_{n}" + val = deadlines_display_processes.get( + key + ) or deadlines_display_processes.get(f"mpi_task_{n}") + if val is not None: + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + proc_deadlines_list.append(_abbr(vdate)) + else: + proc_deadlines_list.append(label or "") + processes_html = processes_template.render( top_task_names=proc_top_headers, group_headers=proc_group_headers, rows=processes_rows, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + processes_variants_max=proc_vmaxes, + deadlines_processes=proc_deadlines_list, ) with open(output_path / "threads.html", "w") as f: @@ -562,7 +1033,36 @@ def _slugify(text: str) -> str: eff_num_proc, deadlines_cfg, ) - html_g = table_template.render(task_types=task_types_threads, rows=rows_g) + # Rebuild deadline labels for this page + auto_threads_dl_g = _compute_display_deadlines_threads(threads_order) + dl_threads_out_g = {} + for t in threads_order: + base_date = auto_threads_dl_g.get(t) + shift_days = 0 + label = None + if deadlines_display_threads and t in deadlines_display_threads: + val = deadlines_display_threads.get(t) + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + dl_threads_out_g[t] = _abbr(vdate) + else: + dl_threads_out_g[t] = label or "" + + html_g = table_template.render( + task_types=task_types_threads, + rows=rows_g, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + threads_variants_max=threads_vmax, + deadlines_threads=dl_threads_out_g, + ) with open(out_file, "w") as f: f.write(html_g) threads_groups_menu.append({"href": out_file.name, "title": g}) @@ -626,6 +1126,7 @@ def _id_key(stud: dict) -> str: proc_top_headers_g = [] proc_group_headers_g = [] proc_groups_g = [] + proc_r_values_g = [] total_points_sum_g = 0 for n in [1, 2, 3]: entry = num_to_dir_g.get(n) @@ -642,7 +1143,12 @@ def _id_key(stud: dict) -> str: task_points = sol_points is_cheated, plagiarism_points = ( check_plagiarism_and_calculate_penalty( - d, ttype, sol_points, plagiarism_cfg, cfg + d, + ttype, + sol_points, + plagiarism_cfg, + cfg, + semester="processes", ) ) task_points += plagiarism_points @@ -665,7 +1171,65 @@ def _id_key(stud: dict) -> str: "plagiarism_points": plagiarism_points, } ) - total_points_sum_g += task_points + # Override displayed points to processes maxima and recompute P + s_mpi_g, s_seq_g, a_max_g, r_max_g = _find_process_points(cfg, n) + has_mpi_g = bool(directories[d].get("mpi")) + has_seq_g = bool(directories[d].get("seq")) + report_present_g = (tasks_dir / d / "report.md").exists() + base_idx = len(proc_groups_g) - 2 + if base_idx >= 0: + proc_groups_g[base_idx]["solution_points"] = ( + s_mpi_g if has_mpi_g else 0 + ) + proc_groups_g[base_idx + 1]["solution_points"] = ( + s_seq_g if has_seq_g else 0 + ) + # Performance for MPI cell + mpi_eff_g = proc_groups_g[base_idx].get("efficiency", "N/A") + perf_points_mpi_g = ( + _calc_perf_points_from_efficiency(mpi_eff_g, a_max_g) + if (has_mpi_g and has_seq_g) + else 0 + ) + if isinstance(mpi_eff_g, str) and mpi_eff_g.endswith("%"): + perf_points_mpi_display_g = perf_points_mpi_g + else: + perf_points_mpi_display_g = "—" + proc_groups_g[base_idx]["perf_points"] = perf_points_mpi_g + proc_groups_g[base_idx]["perf_points_display"] = ( + perf_points_mpi_display_g + ) + proc_groups_g[base_idx + 1]["perf_points"] = 0 + try: + plag_coeff_g = float( + (cfg.get("copying", {}) or cfg.get("plagiarism", {})).get( + "coefficient", 0.0 + ) + ) + except Exception: + plag_coeff_g = 0.0 + p_mpi_g = ( + -plag_coeff_g * s_mpi_g + if (has_mpi_g and proc_groups_g[base_idx].get("plagiarised")) + else 0 + ) + p_seq_g = ( + -plag_coeff_g * s_seq_g + if ( + has_seq_g and proc_groups_g[base_idx + 1].get("plagiarised") + ) + else 0 + ) + proc_groups_g[base_idx]["plagiarism_points"] = p_mpi_g + proc_groups_g[base_idx + 1]["plagiarism_points"] = p_seq_g + + # Sum points by processes S + P + R (and C penalties) + s_inc_g = (s_mpi_g if has_mpi_g else 0) + (s_seq_g if has_seq_g else 0) + r_inc_g = r_max_g if report_present_g else 0 + total_points_sum_g += ( + s_inc_g + perf_points_mpi_g + r_inc_g + p_mpi_g + p_seq_g + ) + proc_r_values_g.append(r_inc_g) else: proc_top_headers_g.append(f"task-{n}") for ttype in ["mpi", "seq"]: @@ -682,7 +1246,8 @@ def _id_key(stud: dict) -> str: "plagiarism_points": "?", } ) - # Missing task: do not affect total; sum only existing + # Missing task: do not affect total; sum only existing; report=0 + proc_r_values_g.append(0) # Row label for group page: name without group (three lines max) row_label_g = f"group {g}" @@ -694,40 +1259,75 @@ def _id_key(stud: dict) -> str: nm = "
".join(nm_parts) row_label_g = nm or row_label_g - # Variant for group row - def _load_variant_g(dir_name: str): - import json - - info_path = tasks_dir / dir_name / "info.json" - if not info_path.exists(): - return "?" - try: - with open(info_path, "r") as f: - data = json.load(f) - return str(data.get("student", {}).get("variant_number", "?")) - except Exception: - return "?" - + # Build three variants (one per task) based on student identity row_variant_g = "?" - for n in [1, 2, 3]: - entry2 = num_to_dir_g.get(n) - if entry2: - row_variant_g = _load_variant_g(entry2[0]) - break + if target_identity_g: + parts = target_identity_g.split("|") + if len(parts) >= 4: + first, last, middle, group = parts[0], parts[1], parts[2], parts[3] + vrender = [] + for n in [1, 2, 3]: + vmax = _find_process_variants_max(cfg, n) + try: + v_idx = assign_variant( + surname=last, + name=first, + patronymic=middle, + group=group, + repo=f"{REPO_SALT}/processes/task-{n}", + num_variants=vmax, + ) + vrender.append(str(v_idx + 1)) + except Exception: + vrender.append("?") + row_variant_g = "
".join(vrender) rows_g = [ { "task": row_label_g, "variant": row_variant_g, "groups": proc_groups_g, + "r_values": proc_r_values_g, + "r_total": sum(proc_r_values_g), "total": total_points_sum_g, } ] + proc_vmaxes_g = [_find_process_variants_max(cfg, n) for n in [1, 2, 3]] + # Build display deadlines for processes group page + auto_proc_dl_g = _compute_display_deadlines_processes(3) + proc_deadlines_list_g: list[str] = [] + for i, n in enumerate([1, 2, 3]): + base_date = auto_proc_dl_g[i] + shift_days = 0 + label = None + if deadlines_display_processes: + key = f"task_{n}" + val = deadlines_display_processes.get( + key + ) or deadlines_display_processes.get(f"mpi_task_{n}") + if val is not None: + if isinstance(val, int): + shift_days = val + else: + try: + shift_days = int(str(val).strip()) + except Exception: + label = str(val) + if label is None and isinstance(base_date, date): + vdate = base_date + timedelta(days=shift_days) + proc_deadlines_list_g.append(_abbr(vdate)) + else: + proc_deadlines_list_g.append(label or "") + html_g = processes_template.render( top_task_names=proc_top_headers_g, group_headers=proc_group_headers_g, rows=rows_g, + generated_msk=generated_msk, + repo_salt=REPO_SALT, + processes_variants_max=proc_vmaxes_g, + deadlines_processes=proc_deadlines_list_g, ) with open(out_file, "w") as f: f.write(html_g) @@ -755,6 +1355,7 @@ def _load_variant_g(dir_name: str): ], groups_threads=threads_groups_menu, groups_processes=processes_groups_menu, + generated_msk=generated_msk, ) with open(output_path / "index.html", "w") as f: diff --git a/scoreboard/requirements.txt b/scoreboard/requirements.txt index e3a1fcb25..d86cf7650 100644 --- a/scoreboard/requirements.txt +++ b/scoreboard/requirements.txt @@ -1 +1,3 @@ Jinja2>=3.0 +PyYAML>=6.0 +backports.zoneinfo; python_version < "3.9" diff --git a/scoreboard/templates/index.html.j2 b/scoreboard/templates/index.html.j2 index 534c6b551..37151c30c 100644 --- a/scoreboard/templates/index.html.j2 +++ b/scoreboard/templates/index.html.j2 @@ -5,33 +5,109 @@ - +
+ Generated (MSK): {{ generated_msk }} +
+
+
Variant Calculator (Threads)
+
+ + + + + + +
+ +
{% for type in task_types %} - + {% set span = 4 if type == 'seq' else 7 %} + {% endfor %} {% for type in task_types %} - {% for letter in ('S', 'A', 'E', 'D', 'P') %} - - {% endfor %} + {% if type == 'seq' %} + {% for letter in ('S', 'D', 'C', 'R') %} + + {% endfor %} + {% else %} + {% for letter in ('S', 'P', 'A', 'E', 'D', 'C', 'R') %} + + {% endfor %} + {% endif %} {% endfor %} {% for row in rows %} - {% for cell in row.types %} - - - - - + {% for type in task_types %} + {% set cell = row.types[loop.index0] %} + {% if type == 'seq' %} + + + + + {% else %} + + + + + + + + {% endif %} {% endfor %} diff --git a/scoreboard/templates/menu_index.html.j2 b/scoreboard/templates/menu_index.html.j2 index ef04119b8..73d125e98 100644 --- a/scoreboard/templates/menu_index.html.j2 +++ b/scoreboard/templates/menu_index.html.j2 @@ -30,7 +30,7 @@ } box.innerHTML = html || 'No groups'; } - window.addEventListener('DOMContentLoaded', () => setGroups('threads')); + window.addEventListener('DOMContentLoaded', () => setGroups('processes')); @@ -43,12 +43,14 @@

(V)ariant - Task variant number assigned to the student.
+ (R)eport - Task report in Markdown (.md), required.
(S)olution - The correctness and completeness of the implemented solution.
(A)cceleration - The process of speeding up software to improve performance. Speedup = T(seq) / T(parallel)
(E)fficiency - Optimizing software speed-up by improving CPU utilization and resource management. Efficiency = Speedup / NumProcs * 100%
- (D)eadline - The timeliness of the submission in relation to the given deadline.
- (P)lagiarism - The originality of the work, ensuring no copied content from external sources. + (P)erformance - Points awarded based on efficiency thresholds (see docs).
+ (D)eadline - The timeliness of the submission in relation to the given deadline (due at 23:59 MSK on the shown date).
+ (C)opying - Penalty for detected copying cases.

- + diff --git a/scoreboard/templates/processes.html.j2 b/scoreboard/templates/processes.html.j2 index 0898dff5f..c39cf32e7 100644 --- a/scoreboard/templates/processes.html.j2 +++ b/scoreboard/templates/processes.html.j2 @@ -5,23 +5,97 @@ +
+ Generated (MSK): {{ generated_msk }} +
+
+
Variant Calculator (Processes)
+
+ + + + + + +
+ +
Name V{{ type }} +
+ {{ type }} + {% if deadlines_threads %} + {{ deadlines_threads.get(type, '') }} + {% endif %} +
+
Total
{{ letter }}{{ letter }}{{ letter }}
{{ row.task }} {{ row.variant }}{{ cell.solution_points }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.solution_points }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ cell.solution_points }}{{ cell.perf_points_display }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell.report }}{{ row.total }}
{% for name in top_task_names %} - + {# For each task: seq (3) + mpi (6) + R (1) = 10 #} + {% endfor %} - {% for header in group_headers %} - + {% for _ in top_task_names %} + + + {% endfor %} - {% for _ in group_headers %} - {% for letter in ('S', 'A', 'E', 'D', 'P') %} + {% for _ in top_task_names %} + {# seq sub-columns (no A/E) #} + {% for letter in ('S', 'D', 'C') %} + + {% endfor %} + {# mpi sub-columns include P (points) + A/E metrics #} + {% for letter in ('S', 'P', 'A', 'E', 'D', 'C') %} {% endfor %} {% endfor %} @@ -30,12 +104,26 @@ - {% for cell in row.groups %} - - - - - + {% set ns = namespace(idx=0, gi=0) %} + {% for _ in top_task_names %} + {# cells are stored as [mpi, seq] per task; render seq first #} + {% set cell_mpi = row.groups[ns.idx] %} + {% set cell_seq = row.groups[ns.idx + 1] %} + {# seq: S, D, P #} + + + + {# mpi: S, P, A, E, D, C #} + + + + + + + {% set ns.idx = ns.idx + 2 %} + {# R value for this task group #} + + {% set ns.gi = ns.gi + 1 %} {% endfor %} diff --git a/scripts/create_perf_table.py b/scripts/create_perf_table.py index 92fd0b6a3..0e5a67f60 100644 --- a/scripts/create_perf_table.py +++ b/scripts/create_perf_table.py @@ -74,8 +74,8 @@ task_name = old_result[0][1] perf_type = old_result[0][2] perf_time = float(old_result[0][3]) - if perf_time < 0.1: - msg = f"Performance time = {perf_time} < 0.1 second : for {task_type} - {task_name} - {perf_type} \n" + if perf_time < 0.001: + msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" raise Exception(msg) result_tables[perf_type][task_name][task_type] = perf_time elif len(new_result): @@ -87,8 +87,8 @@ perf_time = float(new_result[0][4]) task_name = f"example_{task_category}" - if perf_time < 0.1: - msg = f"Performance time = {perf_time} < 0.1 second : for {task_type} - {task_name} - {perf_type} \n" + if perf_time < 0.001: + msg = f"Performance time = {perf_time} < 0.001 second : for {task_type} - {task_name} - {perf_type} \n" raise Exception(msg) if task_name in result_tables[perf_type]: diff --git a/tasks/common/runners/functional.cpp b/tasks/common/runners/functional.cpp index 7eb563375..c32e6e9d1 100644 --- a/tasks/common/runners/functional.cpp +++ b/tasks/common/runners/functional.cpp @@ -4,7 +4,7 @@ #include "runners/include/runners.hpp" #include "util/include/util.hpp" -int main(int argc, char** argv) { +int main(int argc, char **argv) { if (ppc::util::IsUnderMpirun()) { return ppc::runners::Init(argc, argv); } diff --git a/tasks/common/runners/performance.cpp b/tasks/common/runners/performance.cpp index 3fd54c746..a4b6c0e2f 100644 --- a/tasks/common/runners/performance.cpp +++ b/tasks/common/runners/performance.cpp @@ -1,5 +1,5 @@ #include "runners/include/runners.hpp" -int main(int argc, char** argv) { +int main(int argc, char **argv) { return ppc::runners::Init(argc, argv); } diff --git a/tasks/example_processes/info.json b/tasks/example_processes/info.json index 513436aac..de9442bb9 100644 --- a/tasks/example_processes/info.json +++ b/tasks/example_processes/info.json @@ -4,7 +4,6 @@ "last_name": "last_name_p", "middle_name": "middle_name_p", "group_number": "2222222_p", - "task_number": "1", - "variant_number": "23" + "task_number": "1" } } diff --git a/tasks/example_processes/report.md b/tasks/example_processes/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_processes/seq/include/ops_seq.hpp b/tasks/example_processes/seq/include/ops_seq.hpp index 6a1cdd4ce..f264b4fd7 100644 --- a/tasks/example_processes/seq/include/ops_seq.hpp +++ b/tasks/example_processes/seq/include/ops_seq.hpp @@ -10,7 +10,7 @@ class NesterovATestTaskSEQ : public BaseTask { static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { return ppc::task::TypeOfTask::kSEQ; } - explicit NesterovATestTaskSEQ(const InType& in); + explicit NesterovATestTaskSEQ(const InType &in); private: bool ValidationImpl() override; diff --git a/tasks/example_processes/seq/src/ops_seq.cpp b/tasks/example_processes/seq/src/ops_seq.cpp index 9b0b7b582..2599c518e 100644 --- a/tasks/example_processes/seq/src/ops_seq.cpp +++ b/tasks/example_processes/seq/src/ops_seq.cpp @@ -8,7 +8,7 @@ namespace nesterov_a_test_task_processes { -NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType& in) { +NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType &in) { SetTypeOfTask(GetStaticTypeOfTask()); GetInput() = in; GetOutput() = 0; diff --git a/tasks/example_processes/tests/performance/main.cpp b/tasks/example_processes/tests/performance/main.cpp index 7946e1bf5..5d9a4c712 100644 --- a/tasks/example_processes/tests/performance/main.cpp +++ b/tasks/example_processes/tests/performance/main.cpp @@ -8,14 +8,14 @@ namespace nesterov_a_test_task_processes { class ExampleRunPerfTestProcesses : public ppc::util::BaseRunPerfTests { - const int kCount_ = 200; + const int kCount_ = 100; InType input_data_{}; void SetUp() override { input_data_ = kCount_; } - bool CheckTestOutputData(OutType& output_data) final { + bool CheckTestOutputData(OutType &output_data) final { return input_data_ == output_data; } diff --git a/tasks/example_processes_2/common/include/common.hpp b/tasks/example_processes_2/common/include/common.hpp new file mode 100644 index 000000000..145054df0 --- /dev/null +++ b/tasks/example_processes_2/common/include/common.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_2 { + +using InType = int; +using OutType = int; +using TestType = std::tuple; +using BaseTask = ppc::task::Task; + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/data/pic.jpg b/tasks/example_processes_2/data/pic.jpg new file mode 100644 index 000000000..344580234 Binary files /dev/null and b/tasks/example_processes_2/data/pic.jpg differ diff --git a/tasks/example_processes_2/info.json b/tasks/example_processes_2/info.json new file mode 100644 index 000000000..6109c2baf --- /dev/null +++ b/tasks/example_processes_2/info.json @@ -0,0 +1,9 @@ +{ + "student": { + "first_name": "first_name_p", + "last_name": "last_name_p", + "middle_name": "middle_name_p", + "group_number": "2222222_p", + "task_number": "2" + } +} diff --git a/tasks/example_processes_2/mpi/include/ops_mpi.hpp b/tasks/example_processes_2/mpi/include/ops_mpi.hpp new file mode 100644 index 000000000..febfb30ee --- /dev/null +++ b/tasks/example_processes_2/mpi/include/ops_mpi.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "example_processes_2/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_2 { + +class NesterovATestTaskMPI : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kMPI; + } + explicit NesterovATestTaskMPI(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/mpi/src/ops_mpi.cpp b/tasks/example_processes_2/mpi/src/ops_mpi.cpp new file mode 100644 index 000000000..d3570f350 --- /dev/null +++ b/tasks/example_processes_2/mpi/src/ops_mpi.cpp @@ -0,0 +1,72 @@ +#include "example_processes_2/mpi/include/ops_mpi.hpp" + +#include + +#include +#include + +#include "example_processes_2/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_2 { + +NesterovATestTaskMPI::NesterovATestTaskMPI(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool NesterovATestTaskMPI::ValidationImpl() { + return (GetInput() > 0) && (GetOutput() == 0); +} + +bool NesterovATestTaskMPI::PreProcessingImpl() { + GetOutput() = 2 * GetInput(); + return GetOutput() > 0; +} + +bool NesterovATestTaskMPI::RunImpl() { + auto input = GetInput(); + if (input == 0) { + return false; + } + + for (InType i = 0; i < GetInput(); i++) { + for (InType j = 0; j < GetInput(); j++) { + for (InType k = 0; k < GetInput(); k++) { + std::vector tmp(i + j + k, 1); + GetOutput() += std::accumulate(tmp.begin(), tmp.end(), 0); + GetOutput() -= i + j + k; + } + } + } + + const int num_threads = ppc::util::GetNumThreads(); + GetOutput() *= num_threads; + + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == 0) { + GetOutput() /= num_threads; + } else { + int counter = 0; + for (int i = 0; i < num_threads; i++) { + counter++; + } + + if (counter != 0) { + GetOutput() /= counter; + } + } + + MPI_Barrier(MPI_COMM_WORLD); + return GetOutput() > 0; +} + +bool NesterovATestTaskMPI::PostProcessingImpl() { + GetOutput() -= GetInput(); + return GetOutput() > 0; +} + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/report.md b/tasks/example_processes_2/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_processes_2/seq/include/ops_seq.hpp b/tasks/example_processes_2/seq/include/ops_seq.hpp new file mode 100644 index 000000000..ac1ad6944 --- /dev/null +++ b/tasks/example_processes_2/seq/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "example_processes_2/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_2 { + +class NesterovATestTaskSEQ : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kSEQ; + } + explicit NesterovATestTaskSEQ(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/seq/src/ops_seq.cpp b/tasks/example_processes_2/seq/src/ops_seq.cpp new file mode 100644 index 000000000..ea4a0c629 --- /dev/null +++ b/tasks/example_processes_2/seq/src/ops_seq.cpp @@ -0,0 +1,60 @@ +#include "example_processes_2/seq/include/ops_seq.hpp" + +#include +#include + +#include "example_processes_2/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_2 { + +NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool NesterovATestTaskSEQ::ValidationImpl() { + return (GetInput() > 0) && (GetOutput() == 0); +} + +bool NesterovATestTaskSEQ::PreProcessingImpl() { + GetOutput() = 2 * GetInput(); + return GetOutput() > 0; +} + +bool NesterovATestTaskSEQ::RunImpl() { + if (GetInput() == 0) { + return false; + } + + for (InType i = 0; i < GetInput(); i++) { + for (InType j = 0; j < GetInput(); j++) { + for (InType k = 0; k < GetInput(); k++) { + std::vector tmp(i + j + k, 1); + GetOutput() += std::accumulate(tmp.begin(), tmp.end(), 0); + GetOutput() -= i + j + k; + } + } + } + + const int num_threads = ppc::util::GetNumThreads(); + GetOutput() *= num_threads; + + int counter = 0; + for (int i = 0; i < num_threads; i++) { + counter++; + } + + if (counter != 0) { + GetOutput() /= counter; + } + return GetOutput() > 0; +} + +bool NesterovATestTaskSEQ::PostProcessingImpl() { + GetOutput() -= GetInput(); + return GetOutput() > 0; +} + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/settings.json b/tasks/example_processes_2/settings.json new file mode 100644 index 000000000..b1a0d5257 --- /dev/null +++ b/tasks/example_processes_2/settings.json @@ -0,0 +1,7 @@ +{ + "tasks_type": "processes", + "tasks": { + "mpi": "enabled", + "seq": "enabled" + } +} diff --git a/tasks/example_processes_2/tests/.clang-tidy b/tasks/example_processes_2/tests/.clang-tidy new file mode 100644 index 000000000..ef43b7aa8 --- /dev/null +++ b/tasks/example_processes_2/tests/.clang-tidy @@ -0,0 +1,13 @@ +InheritParentConfig: true + +Checks: > + -modernize-loop-convert, + -cppcoreguidelines-avoid-goto, + -cppcoreguidelines-avoid-non-const-global-variables, + -misc-use-anonymous-namespace, + -modernize-use-std-print, + -modernize-type-traits + +CheckOptions: + - key: readability-function-cognitive-complexity.Threshold + value: 50 # Relaxed for tests diff --git a/tasks/example_processes_2/tests/functional/main.cpp b/tasks/example_processes_2/tests/functional/main.cpp new file mode 100644 index 000000000..ff3442fda --- /dev/null +++ b/tasks/example_processes_2/tests/functional/main.cpp @@ -0,0 +1,85 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "example_processes_2/common/include/common.hpp" +#include "example_processes_2/mpi/include/ops_mpi.hpp" +#include "example_processes_2/seq/include/ops_seq.hpp" +#include "util/include/func_test_util.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_2 { + +class NesterovARunFuncTestsProcesses2 : public ppc::util::BaseRunFuncTests { + public: + static std::string PrintTestParam(const TestType &test_param) { + return std::to_string(std::get<0>(test_param)) + "_" + std::get<1>(test_param); + } + + protected: + void SetUp() override { + int width = -1; + int height = -1; + int channels = -1; + std::vector img; + // Read image + { + std::string abs_path = ppc::util::GetAbsoluteTaskPath(PPC_ID_example_processes_2, "pic.jpg"); + auto *data = stbi_load(abs_path.c_str(), &width, &height, &channels, 0); + if (data == nullptr) { + throw std::runtime_error("Failed to load image: " + std::string(stbi_failure_reason())); + } + img = std::vector(data, data + (static_cast(width * height * channels))); + stbi_image_free(data); + if (std::cmp_not_equal(width, height)) { + throw std::runtime_error("width != height: "); + } + } + + TestType params = std::get(ppc::util::GTestParamIndex::kTestParams)>(GetParam()); + input_data_ = width - height + std::min(std::accumulate(img.begin(), img.end(), 0), channels); + } + + bool CheckTestOutputData(OutType &output_data) final { + return (input_data_ == output_data); + } + + InType GetTestInputData() final { + return input_data_; + } + + private: + InType input_data_ = 0; +}; + +namespace { + +TEST_P(NesterovARunFuncTestsProcesses2, MatmulFromPic) { + ExecuteTest(GetParam()); +} + +const std::array kTestParam = {std::make_tuple(3, "3"), std::make_tuple(5, "5"), std::make_tuple(7, "7")}; + +const auto kTestTasksList = + std::tuple_cat(ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_example_processes_2), + ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_example_processes_2)); + +const auto kGtestValues = ppc::util::ExpandToValues(kTestTasksList); + +const auto kPerfTestName = NesterovARunFuncTestsProcesses2::PrintFuncTestName; + +INSTANTIATE_TEST_SUITE_P(PicMatrixTests, NesterovARunFuncTestsProcesses2, kGtestValues, kPerfTestName); + +} // namespace + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_2/tests/performance/main.cpp b/tasks/example_processes_2/tests/performance/main.cpp new file mode 100644 index 000000000..23036173c --- /dev/null +++ b/tasks/example_processes_2/tests/performance/main.cpp @@ -0,0 +1,40 @@ +#include + +#include "example_processes_2/common/include/common.hpp" +#include "example_processes_2/mpi/include/ops_mpi.hpp" +#include "example_processes_2/seq/include/ops_seq.hpp" +#include "util/include/perf_test_util.hpp" + +namespace nesterov_a_test_task_processes_2 { + +class ExampleRunPerfTestProcesses2 : public ppc::util::BaseRunPerfTests { + const int kCount_ = 100; + InType input_data_{}; + + void SetUp() override { + input_data_ = kCount_; + } + + bool CheckTestOutputData(OutType &output_data) final { + return input_data_ == output_data; + } + + InType GetTestInputData() final { + return input_data_; + } +}; + +TEST_P(ExampleRunPerfTestProcesses2, RunPerfModes) { + ExecuteTest(GetParam()); +} + +const auto kAllPerfTasks = + ppc::util::MakeAllPerfTasks(PPC_SETTINGS_example_processes_2); + +const auto kGtestValues = ppc::util::TupleToGTestValues(kAllPerfTasks); + +const auto kPerfTestName = ExampleRunPerfTestProcesses2::CustomPerfTestName; + +INSTANTIATE_TEST_SUITE_P(RunModeTests, ExampleRunPerfTestProcesses2, kGtestValues, kPerfTestName); + +} // namespace nesterov_a_test_task_processes_2 diff --git a/tasks/example_processes_3/common/include/common.hpp b/tasks/example_processes_3/common/include/common.hpp new file mode 100644 index 000000000..ac343ab8f --- /dev/null +++ b/tasks/example_processes_3/common/include/common.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_3 { + +using InType = int; +using OutType = int; +using TestType = std::tuple; +using BaseTask = ppc::task::Task; + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/data/pic.jpg b/tasks/example_processes_3/data/pic.jpg new file mode 100644 index 000000000..344580234 Binary files /dev/null and b/tasks/example_processes_3/data/pic.jpg differ diff --git a/tasks/example_processes_3/info.json b/tasks/example_processes_3/info.json new file mode 100644 index 000000000..108fe1f77 --- /dev/null +++ b/tasks/example_processes_3/info.json @@ -0,0 +1,9 @@ +{ + "student": { + "first_name": "first_name_p", + "last_name": "last_name_p", + "middle_name": "middle_name_p", + "group_number": "2222222_p", + "task_number": "3" + } +} diff --git a/tasks/example_processes_3/mpi/include/ops_mpi.hpp b/tasks/example_processes_3/mpi/include/ops_mpi.hpp new file mode 100644 index 000000000..080e96585 --- /dev/null +++ b/tasks/example_processes_3/mpi/include/ops_mpi.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "example_processes_3/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_3 { + +class NesterovATestTaskMPI : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kMPI; + } + explicit NesterovATestTaskMPI(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/mpi/src/ops_mpi.cpp b/tasks/example_processes_3/mpi/src/ops_mpi.cpp new file mode 100644 index 000000000..7cef9dbb6 --- /dev/null +++ b/tasks/example_processes_3/mpi/src/ops_mpi.cpp @@ -0,0 +1,72 @@ +#include "example_processes_3/mpi/include/ops_mpi.hpp" + +#include + +#include +#include + +#include "example_processes_3/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_3 { + +NesterovATestTaskMPI::NesterovATestTaskMPI(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool NesterovATestTaskMPI::ValidationImpl() { + return (GetInput() > 0) && (GetOutput() == 0); +} + +bool NesterovATestTaskMPI::PreProcessingImpl() { + GetOutput() = 2 * GetInput(); + return GetOutput() > 0; +} + +bool NesterovATestTaskMPI::RunImpl() { + auto input = GetInput(); + if (input == 0) { + return false; + } + + for (InType i = 0; i < GetInput(); i++) { + for (InType j = 0; j < GetInput(); j++) { + for (InType k = 0; k < GetInput(); k++) { + std::vector tmp(i + j + k, 1); + GetOutput() += std::accumulate(tmp.begin(), tmp.end(), 0); + GetOutput() -= i + j + k; + } + } + } + + const int num_threads = ppc::util::GetNumThreads(); + GetOutput() *= num_threads; + + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + if (rank == 0) { + GetOutput() /= num_threads; + } else { + int counter = 0; + for (int i = 0; i < num_threads; i++) { + counter++; + } + + if (counter != 0) { + GetOutput() /= counter; + } + } + + MPI_Barrier(MPI_COMM_WORLD); + return GetOutput() > 0; +} + +bool NesterovATestTaskMPI::PostProcessingImpl() { + GetOutput() -= GetInput(); + return GetOutput() > 0; +} + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/report.md b/tasks/example_processes_3/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_processes_3/seq/include/ops_seq.hpp b/tasks/example_processes_3/seq/include/ops_seq.hpp new file mode 100644 index 000000000..5a7b33677 --- /dev/null +++ b/tasks/example_processes_3/seq/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "example_processes_3/common/include/common.hpp" +#include "task/include/task.hpp" + +namespace nesterov_a_test_task_processes_3 { + +class NesterovATestTaskSEQ : public BaseTask { + public: + static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { + return ppc::task::TypeOfTask::kSEQ; + } + explicit NesterovATestTaskSEQ(const InType &in); + + private: + bool ValidationImpl() override; + bool PreProcessingImpl() override; + bool RunImpl() override; + bool PostProcessingImpl() override; +}; + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/seq/src/ops_seq.cpp b/tasks/example_processes_3/seq/src/ops_seq.cpp new file mode 100644 index 000000000..1db5c7340 --- /dev/null +++ b/tasks/example_processes_3/seq/src/ops_seq.cpp @@ -0,0 +1,60 @@ +#include "example_processes_3/seq/include/ops_seq.hpp" + +#include +#include + +#include "example_processes_3/common/include/common.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_3 { + +NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType &in) { + SetTypeOfTask(GetStaticTypeOfTask()); + GetInput() = in; + GetOutput() = 0; +} + +bool NesterovATestTaskSEQ::ValidationImpl() { + return (GetInput() > 0) && (GetOutput() == 0); +} + +bool NesterovATestTaskSEQ::PreProcessingImpl() { + GetOutput() = 2 * GetInput(); + return GetOutput() > 0; +} + +bool NesterovATestTaskSEQ::RunImpl() { + if (GetInput() == 0) { + return false; + } + + for (InType i = 0; i < GetInput(); i++) { + for (InType j = 0; j < GetInput(); j++) { + for (InType k = 0; k < GetInput(); k++) { + std::vector tmp(i + j + k, 1); + GetOutput() += std::accumulate(tmp.begin(), tmp.end(), 0); + GetOutput() -= i + j + k; + } + } + } + + const int num_threads = ppc::util::GetNumThreads(); + GetOutput() *= num_threads; + + int counter = 0; + for (int i = 0; i < num_threads; i++) { + counter++; + } + + if (counter != 0) { + GetOutput() /= counter; + } + return GetOutput() > 0; +} + +bool NesterovATestTaskSEQ::PostProcessingImpl() { + GetOutput() -= GetInput(); + return GetOutput() > 0; +} + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/settings.json b/tasks/example_processes_3/settings.json new file mode 100644 index 000000000..b1a0d5257 --- /dev/null +++ b/tasks/example_processes_3/settings.json @@ -0,0 +1,7 @@ +{ + "tasks_type": "processes", + "tasks": { + "mpi": "enabled", + "seq": "enabled" + } +} diff --git a/tasks/example_processes_3/tests/.clang-tidy b/tasks/example_processes_3/tests/.clang-tidy new file mode 100644 index 000000000..ef43b7aa8 --- /dev/null +++ b/tasks/example_processes_3/tests/.clang-tidy @@ -0,0 +1,13 @@ +InheritParentConfig: true + +Checks: > + -modernize-loop-convert, + -cppcoreguidelines-avoid-goto, + -cppcoreguidelines-avoid-non-const-global-variables, + -misc-use-anonymous-namespace, + -modernize-use-std-print, + -modernize-type-traits + +CheckOptions: + - key: readability-function-cognitive-complexity.Threshold + value: 50 # Relaxed for tests diff --git a/tasks/example_processes_3/tests/functional/main.cpp b/tasks/example_processes_3/tests/functional/main.cpp new file mode 100644 index 000000000..99e5a5b15 --- /dev/null +++ b/tasks/example_processes_3/tests/functional/main.cpp @@ -0,0 +1,85 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "example_processes_3/common/include/common.hpp" +#include "example_processes_3/mpi/include/ops_mpi.hpp" +#include "example_processes_3/seq/include/ops_seq.hpp" +#include "util/include/func_test_util.hpp" +#include "util/include/util.hpp" + +namespace nesterov_a_test_task_processes_3 { + +class NesterovARunFuncTestsProcesses3 : public ppc::util::BaseRunFuncTests { + public: + static std::string PrintTestParam(const TestType &test_param) { + return std::to_string(std::get<0>(test_param)) + "_" + std::get<1>(test_param); + } + + protected: + void SetUp() override { + int width = -1; + int height = -1; + int channels = -1; + std::vector img; + // Read image + { + std::string abs_path = ppc::util::GetAbsoluteTaskPath(PPC_ID_example_processes_3, "pic.jpg"); + auto *data = stbi_load(abs_path.c_str(), &width, &height, &channels, 0); + if (data == nullptr) { + throw std::runtime_error("Failed to load image: " + std::string(stbi_failure_reason())); + } + img = std::vector(data, data + (static_cast(width * height * channels))); + stbi_image_free(data); + if (std::cmp_not_equal(width, height)) { + throw std::runtime_error("width != height: "); + } + } + + TestType params = std::get(ppc::util::GTestParamIndex::kTestParams)>(GetParam()); + input_data_ = width - height + std::min(std::accumulate(img.begin(), img.end(), 0), channels); + } + + bool CheckTestOutputData(OutType &output_data) final { + return (input_data_ == output_data); + } + + InType GetTestInputData() final { + return input_data_; + } + + private: + InType input_data_ = 0; +}; + +namespace { + +TEST_P(NesterovARunFuncTestsProcesses3, MatmulFromPic) { + ExecuteTest(GetParam()); +} + +const std::array kTestParam = {std::make_tuple(3, "3"), std::make_tuple(5, "5"), std::make_tuple(7, "7")}; + +const auto kTestTasksList = + std::tuple_cat(ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_example_processes_3), + ppc::util::AddFuncTask(kTestParam, PPC_SETTINGS_example_processes_3)); + +const auto kGtestValues = ppc::util::ExpandToValues(kTestTasksList); + +const auto kPerfTestName = NesterovARunFuncTestsProcesses3::PrintFuncTestName; + +INSTANTIATE_TEST_SUITE_P(PicMatrixTests, NesterovARunFuncTestsProcesses3, kGtestValues, kPerfTestName); + +} // namespace + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_processes_3/tests/performance/main.cpp b/tasks/example_processes_3/tests/performance/main.cpp new file mode 100644 index 000000000..cae4b67d9 --- /dev/null +++ b/tasks/example_processes_3/tests/performance/main.cpp @@ -0,0 +1,40 @@ +#include + +#include "example_processes_3/common/include/common.hpp" +#include "example_processes_3/mpi/include/ops_mpi.hpp" +#include "example_processes_3/seq/include/ops_seq.hpp" +#include "util/include/perf_test_util.hpp" + +namespace nesterov_a_test_task_processes_3 { + +class ExampleRunPerfTestProcesses3 : public ppc::util::BaseRunPerfTests { + const int kCount_ = 100; + InType input_data_{}; + + void SetUp() override { + input_data_ = kCount_; + } + + bool CheckTestOutputData(OutType &output_data) final { + return input_data_ == output_data; + } + + InType GetTestInputData() final { + return input_data_; + } +}; + +TEST_P(ExampleRunPerfTestProcesses3, RunPerfModes) { + ExecuteTest(GetParam()); +} + +const auto kAllPerfTasks = + ppc::util::MakeAllPerfTasks(PPC_SETTINGS_example_processes_3); + +const auto kGtestValues = ppc::util::TupleToGTestValues(kAllPerfTasks); + +const auto kPerfTestName = ExampleRunPerfTestProcesses3::CustomPerfTestName; + +INSTANTIATE_TEST_SUITE_P(RunModeTests, ExampleRunPerfTestProcesses3, kGtestValues, kPerfTestName); + +} // namespace nesterov_a_test_task_processes_3 diff --git a/tasks/example_threads/all/report.md b/tasks/example_threads/all/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/info.json b/tasks/example_threads/info.json index cde6f792c..4df5e8c27 100644 --- a/tasks/example_threads/info.json +++ b/tasks/example_threads/info.json @@ -4,7 +4,6 @@ "last_name": "last_name_t", "middle_name": "middle_name_t", "group_number": "2222222_t", - "task_number": "1", - "variant_number": "23" + "task_number": "1" } } diff --git a/tasks/example_threads/omp/include/ops_omp.hpp b/tasks/example_threads/omp/include/ops_omp.hpp index d1075c941..4cea6b0e5 100644 --- a/tasks/example_threads/omp/include/ops_omp.hpp +++ b/tasks/example_threads/omp/include/ops_omp.hpp @@ -10,7 +10,7 @@ class NesterovATestTaskOMP : public BaseTask { static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { return ppc::task::TypeOfTask::kOMP; } - explicit NesterovATestTaskOMP(const InType& in); + explicit NesterovATestTaskOMP(const InType &in); private: bool ValidationImpl() override; diff --git a/tasks/example_threads/omp/report.md b/tasks/example_threads/omp/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/omp/src/ops_omp.cpp b/tasks/example_threads/omp/src/ops_omp.cpp index 3a1dca4cc..63c16a9f6 100644 --- a/tasks/example_threads/omp/src/ops_omp.cpp +++ b/tasks/example_threads/omp/src/ops_omp.cpp @@ -9,7 +9,7 @@ namespace nesterov_a_test_task_threads { -NesterovATestTaskOMP::NesterovATestTaskOMP(const InType& in) { +NesterovATestTaskOMP::NesterovATestTaskOMP(const InType &in) { SetTypeOfTask(GetStaticTypeOfTask()); GetInput() = in; GetOutput() = 0; diff --git a/tasks/example_threads/report.md b/tasks/example_threads/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/seq/include/ops_seq.hpp b/tasks/example_threads/seq/include/ops_seq.hpp index f92cd88f1..a16e3a390 100644 --- a/tasks/example_threads/seq/include/ops_seq.hpp +++ b/tasks/example_threads/seq/include/ops_seq.hpp @@ -10,7 +10,7 @@ class NesterovATestTaskSEQ : public BaseTask { static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { return ppc::task::TypeOfTask::kSEQ; } - explicit NesterovATestTaskSEQ(const InType& in); + explicit NesterovATestTaskSEQ(const InType &in); private: bool ValidationImpl() override; diff --git a/tasks/example_threads/seq/report.md b/tasks/example_threads/seq/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/seq/src/ops_seq.cpp b/tasks/example_threads/seq/src/ops_seq.cpp index 309fb4cec..8888b3258 100644 --- a/tasks/example_threads/seq/src/ops_seq.cpp +++ b/tasks/example_threads/seq/src/ops_seq.cpp @@ -8,7 +8,7 @@ namespace nesterov_a_test_task_threads { -NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType& in) { +NesterovATestTaskSEQ::NesterovATestTaskSEQ(const InType &in) { SetTypeOfTask(GetStaticTypeOfTask()); GetInput() = in; GetOutput() = 0; diff --git a/tasks/example_threads/stl/include/ops_stl.hpp b/tasks/example_threads/stl/include/ops_stl.hpp index 1f38eb90e..f33b8b8be 100644 --- a/tasks/example_threads/stl/include/ops_stl.hpp +++ b/tasks/example_threads/stl/include/ops_stl.hpp @@ -10,7 +10,7 @@ class NesterovATestTaskSTL : public BaseTask { static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { return ppc::task::TypeOfTask::kSTL; } - explicit NesterovATestTaskSTL(const InType& in); + explicit NesterovATestTaskSTL(const InType &in); private: bool ValidationImpl() override; diff --git a/tasks/example_threads/stl/report.md b/tasks/example_threads/stl/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/tbb/include/ops_tbb.hpp b/tasks/example_threads/tbb/include/ops_tbb.hpp index 0ec735b39..dabe985b4 100644 --- a/tasks/example_threads/tbb/include/ops_tbb.hpp +++ b/tasks/example_threads/tbb/include/ops_tbb.hpp @@ -10,7 +10,7 @@ class NesterovATestTaskTBB : public BaseTask { static constexpr ppc::task::TypeOfTask GetStaticTypeOfTask() { return ppc::task::TypeOfTask::kTBB; } - explicit NesterovATestTaskTBB(const InType& in); + explicit NesterovATestTaskTBB(const InType &in); private: bool ValidationImpl() override; diff --git a/tasks/example_threads/tbb/report.md b/tasks/example_threads/tbb/report.md new file mode 100644 index 000000000..e69de29bb diff --git a/tasks/example_threads/tests/performance/main.cpp b/tasks/example_threads/tests/performance/main.cpp index 32d70fd25..c59e81742 100644 --- a/tasks/example_threads/tests/performance/main.cpp +++ b/tasks/example_threads/tests/performance/main.cpp @@ -18,7 +18,7 @@ class ExampleRunPerfTestThreads : public ppc::util::BaseRunPerfTests
Name V{{ name }} +
+ {{ name }} + {% if deadlines_processes %} + {{ deadlines_processes[loop.index0] }} + {% endif %} +
+
Total
{{ header.type }}seqmpiR
{{ letter }}{{ letter }}
{{ row.task }} {{ row.variant }}{{ cell.solution_points }}{{ cell.acceleration }}{{ cell.efficiency }}{{ cell.deadline_points }}{{ cell.plagiarism_points }}{{ cell_seq.solution_points }}{{ cell_seq.deadline_points }}{{ cell_seq.plagiarism_points }}{{ cell_mpi.solution_points }}{{ (cell_mpi.perf_points_display if cell_mpi.perf_points_display is defined else cell_mpi.perf_points) }}{{ cell_mpi.acceleration }}{{ cell_mpi.efficiency }}{{ cell_mpi.deadline_points }}{{ cell_mpi.plagiarism_points }}{{ row.r_values[ns.gi] if row.r_values is defined else 0 }}{{ row.total }}