From 43b0b804e9ef94537fb7450cdac9660a6701da3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Kami=C5=84ski?= Date: Mon, 6 Oct 2025 11:53:23 +0000 Subject: [PATCH 1/5] [Benchmarks] Metadata generator --- devops/scripts/benchmarks/benches/compute.py | 66 +++----------- .../benchmarks/benches/compute_metadata.py | 87 +++++++++++++++++++ devops/scripts/benchmarks/utils/result.py | 4 +- 3 files changed, 102 insertions(+), 55 deletions(-) create mode 100644 devops/scripts/benchmarks/benches/compute_metadata.py diff --git a/devops/scripts/benchmarks/benches/compute.py b/devops/scripts/benchmarks/benches/compute.py index 7041bad3ed689..950321ba594bc 100644 --- a/devops/scripts/benchmarks/benches/compute.py +++ b/devops/scripts/benchmarks/benches/compute.py @@ -16,6 +16,7 @@ from utils.result import BenchmarkMetadata, Result from .base import Benchmark, Suite, TracingType +from .compute_metadata import ComputeMetadataGenerator class RUNTIMES(Enum): @@ -99,61 +100,20 @@ def setup(self) -> None: self.project.build(add_sycl=True) def additional_metadata(self) -> dict[str, BenchmarkMetadata]: - metadata = { - "SinKernelGraph": BenchmarkMetadata( - type="group", - unstable="This benchmark combines both eager and graph execution, and may not be representative of real use cases.", - tags=["submit", "memory", "proxy", "SYCL", "UR", "L0", "graph"], - ), - "FinalizeGraph": BenchmarkMetadata( - type="group", tags=["finalize", "micro", "SYCL", "graph"] - ), - } - - # Add metadata for all SubmitKernel group variants - submit_kernel_metadata = BenchmarkMetadata( - type="group", - notes="Each layer builds on top of the previous layer, adding functionality and overhead.\n" - "The first layer is the Level Zero API, the second is the Unified Runtime API, and the third is the SYCL API.\n" - "The UR v2 adapter noticeably reduces UR layer overhead, also improving SYCL performance.\n" - "Work is ongoing to reduce the overhead of the SYCL API\n", - tags=["submit", "micro", "SYCL", "UR", "L0"], - range_min=0.0, - ) - for order in ["in order", "out of order"]: - for completion in ["", " with completion"]: - for events in ["", " using events"]: - group_name = f"SubmitKernel {order}{completion}{events} long kernel" - metadata[group_name] = copy.deepcopy(submit_kernel_metadata) - metadata[group_name].description = ( - f"Measures CPU time overhead of submitting {order} kernels with longer execution times through different APIs." - ) - # CPU count variants - cpu_count_group = f"{group_name}, CPU count" - metadata[cpu_count_group] = copy.deepcopy(submit_kernel_metadata) - metadata[cpu_count_group].description = ( - f"Measures CPU instruction count overhead of submitting {order} kernels with longer execution times through different APIs." - ) - - # Add metadata for all SubmitGraph group variants - submit_graph_metadata = BenchmarkMetadata( - type="group", tags=["submit", "micro", "SYCL", "UR", "L0", "graph"] - ) - for order in ["in order", "out of order"]: - for completion in ["", " with completion"]: - for events in ["", " using events"]: - for num_kernels in self.submit_graph_num_kernels: - for host_tasks in ["", " use host tasks"]: - group_name = f"SubmitGraph {order}{completion}{events}{host_tasks}, {num_kernels} kernels" - metadata[group_name] = copy.deepcopy(submit_graph_metadata) - # CPU count variants - cpu_count_group = f"{group_name}, CPU count" - metadata[cpu_count_group] = copy.deepcopy( - submit_graph_metadata - ) - return metadata + """ + Returns: + Dictionary mapping group names to their metadata + """ + # Generate metadata based on actual benchmark instances + generator = ComputeMetadataGenerator() + benchmarks = self.benchmarks() + return generator.generate_metadata_from_benchmarks(benchmarks) def benchmarks(self) -> list[Benchmark]: + """ + Returns: + List of all possible benchmark instances + """ benches = [] # hand-picked value so that total execution time of the benchmark is diff --git a/devops/scripts/benchmarks/benches/compute_metadata.py b/devops/scripts/benchmarks/benches/compute_metadata.py new file mode 100644 index 0000000000000..445002c40c7c8 --- /dev/null +++ b/devops/scripts/benchmarks/benches/compute_metadata.py @@ -0,0 +1,87 @@ +# Copyright (C) 2025 Intel Corporation +# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. +# See LICENSE.TXT +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +""" +Metadata generator for Compute Benchmarks. + +This module provides centralized metadata generation for Compute Benchmark groups, +ensuring consistency between benchmark group membership and group metadata definitions. +""" + +from typing import Dict, List + +from utils.result import BenchmarkMetadata + +from .base import Benchmark + + +class ComputeMetadataGenerator: + """ + Generates metadata for Compute Benchmark groups. + + This class keeps the logic for creating group metadata, ensuring that + all possible benchmark group configurations have corresponding metadata entries. + """ + + def __init__(self): + # Base metadata for core groups + self._base_group_metadata = { + "SubmitKernel": { + "description": "Measures CPU time overhead of submitting kernels through different APIs.", + "notes": ( + "Each layer builds on top of the previous layer, adding functionality and overhead.\n" + "The first layer is the Level Zero API, the second is the Unified Runtime API, and the third is the SYCL API.\n" + "The UR v2 adapter noticeably reduces UR layer overhead, also improving SYCL performance.\n" + "Work is ongoing to reduce the overhead of the SYCL API\n" + ), + "tags": ["submit", "micro", "SYCL", "UR", "L0"], + "range_min": 0.0, + }, + "SinKernelGraph": { + "unstable": "This benchmark combines both eager and graph execution, and may not be representative of real use cases.", + "tags": ["submit", "memory", "proxy", "SYCL", "UR", "L0", "graph"], + }, + "SubmitGraph": {"tags": ["submit", "micro", "SYCL", "UR", "L0", "graph"]}, + "FinalizeGraph": {"tags": ["finalize", "micro", "SYCL", "graph"]}, + } + + def generate_metadata_from_benchmarks( + self, benchmarks: List[Benchmark] + ) -> Dict[str, BenchmarkMetadata]: + """ + Generate group metadata based on actual benchmark configurations. + + Args: + benchmarks: List of benchmark instances to analyze + + Returns: + Dictionary mapping group names to their metadata + """ + metadata = {} + # Discover all group names from actual benchmarks + for benchmark in benchmarks: + if hasattr(benchmark, "explicit_group") and callable( + benchmark.explicit_group + ): + group_name = benchmark.explicit_group() + if group_name: + self._generate_metadata(metadata, group_name) + + return metadata + + def _generate_metadata( + self, metadata: Dict[str, BenchmarkMetadata], group_name: str + ): + base_metadata = self._base_group_metadata.get(group_name.split()[0], {}) + metadata[group_name] = BenchmarkMetadata( + type="group", + description=base_metadata.get("description"), + notes=base_metadata.get("notes"), + unstable=base_metadata.get("unstable"), + tags=base_metadata.get("tags", []), + range_min=base_metadata.get("range_min"), + range_max=base_metadata.get("range_max"), + explicit_group=group_name, + ) diff --git a/devops/scripts/benchmarks/utils/result.py b/devops/scripts/benchmarks/utils/result.py index d17db392e9334..36f2f67e8b44d 100644 --- a/devops/scripts/benchmarks/utils/result.py +++ b/devops/scripts/benchmarks/utils/result.py @@ -75,8 +75,8 @@ class BenchmarkMetadata: notes: str = None unstable: str = None tags: list[str] = field(default_factory=list) - range_min: float = None - range_max: float = None + range_min: float | None = None + range_max: float | None = None display_name: str = None explicit_group: str = None From ff00adc6cac6c18322288e29d2d38dd18b80454e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Kami=C5=84ski?= Date: Tue, 7 Oct 2025 12:13:15 +0000 Subject: [PATCH 2/5] Apply lslusarczyk's comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Łukasz Ślusarczyk --- .../benchmarks/benches/compute_metadata.py | 109 +++++++++++++----- 1 file changed, 80 insertions(+), 29 deletions(-) diff --git a/devops/scripts/benchmarks/benches/compute_metadata.py b/devops/scripts/benchmarks/benches/compute_metadata.py index 445002c40c7c8..d6996f90234e4 100644 --- a/devops/scripts/benchmarks/benches/compute_metadata.py +++ b/devops/scripts/benchmarks/benches/compute_metadata.py @@ -10,6 +10,7 @@ ensuring consistency between benchmark group membership and group metadata definitions. """ +from collections import namedtuple from typing import Dict, List from utils.result import BenchmarkMetadata @@ -17,6 +18,42 @@ from .base import Benchmark +def string_consts(cls): + """Decorator to convert string-annotated class attributes to string constants.""" + for key, value in cls.__annotations__.items(): + if value is str: + setattr(cls, key, key) + return cls + + +@string_consts +class Tags: + """String constants for benchmark tags to prevent typos.""" + + submit: str + micro: str + SYCL: str + UR: str + L0: str + graph: str + memory: str + proxy: str + finalize: str + + +BaseGroupMetadata = namedtuple( + "BaseGroupMetadata", + [ + "description", + "notes", + "unstable", + "tags", + "range_min", + "range_max", + ], + defaults=(None, None, None, [], None, None), +) + class ComputeMetadataGenerator: """ Generates metadata for Compute Benchmark groups. @@ -28,23 +65,35 @@ class ComputeMetadataGenerator: def __init__(self): # Base metadata for core groups self._base_group_metadata = { - "SubmitKernel": { - "description": "Measures CPU time overhead of submitting kernels through different APIs.", - "notes": ( + "SubmitKernel": BaseGroupMetadata( + description="Measures CPU time overhead of submitting kernels through different APIs.", + notes=( "Each layer builds on top of the previous layer, adding functionality and overhead.\n" "The first layer is the Level Zero API, the second is the Unified Runtime API, and the third is the SYCL API.\n" "The UR v2 adapter noticeably reduces UR layer overhead, also improving SYCL performance.\n" "Work is ongoing to reduce the overhead of the SYCL API\n" ), - "tags": ["submit", "micro", "SYCL", "UR", "L0"], - "range_min": 0.0, - }, - "SinKernelGraph": { - "unstable": "This benchmark combines both eager and graph execution, and may not be representative of real use cases.", - "tags": ["submit", "memory", "proxy", "SYCL", "UR", "L0", "graph"], - }, - "SubmitGraph": {"tags": ["submit", "micro", "SYCL", "UR", "L0", "graph"]}, - "FinalizeGraph": {"tags": ["finalize", "micro", "SYCL", "graph"]}, + tags=[Tags.submit, Tags.micro, Tags.SYCL, Tags.UR, Tags.L0], + range_min=0.0, + ), + "SinKernelGraph": BaseGroupMetadata( + unstable="This benchmark combines both eager and graph execution, and may not be representative of real use cases.", + tags=[ + Tags.submit, + Tags.memory, + Tags.proxy, + Tags.SYCL, + Tags.UR, + Tags.L0, + Tags.graph, + ], + ), + "SubmitGraph": BaseGroupMetadata( + tags=[Tags.submit, Tags.micro, Tags.SYCL, Tags.UR, Tags.L0, Tags.graph] + ), + "FinalizeGraph": BaseGroupMetadata( + tags=[Tags.finalize, Tags.micro, Tags.SYCL, Tags.graph] + ), } def generate_metadata_from_benchmarks( @@ -62,26 +111,28 @@ def generate_metadata_from_benchmarks( metadata = {} # Discover all group names from actual benchmarks for benchmark in benchmarks: - if hasattr(benchmark, "explicit_group") and callable( - benchmark.explicit_group - ): - group_name = benchmark.explicit_group() - if group_name: - self._generate_metadata(metadata, group_name) + group_name = benchmark.explicit_group() + if group_name and group_name not in metadata: + metadata[group_name] = self._generate_metadata(group_name) return metadata - def _generate_metadata( - self, metadata: Dict[str, BenchmarkMetadata], group_name: str - ): - base_metadata = self._base_group_metadata.get(group_name.split()[0], {}) - metadata[group_name] = BenchmarkMetadata( + def _generate_metadata(self, group_name: str) -> BenchmarkMetadata: + """ + Generate metadata for a specific benchmark group. + Args: + group_name: Name of the benchmark group + """ + base_metadata = self._base_group_metadata.get( + group_name.split()[0], BaseGroupMetadata() + ) + return BenchmarkMetadata( type="group", - description=base_metadata.get("description"), - notes=base_metadata.get("notes"), - unstable=base_metadata.get("unstable"), - tags=base_metadata.get("tags", []), - range_min=base_metadata.get("range_min"), - range_max=base_metadata.get("range_max"), + description=base_metadata.description, + notes=base_metadata.notes, + unstable=base_metadata.unstable, + tags=base_metadata.tags, + range_min=base_metadata.range_min, + range_max=base_metadata.range_max, explicit_group=group_name, ) From 988cb088187b5971aa4f9399cf60b1a871a8c0c3 Mon Sep 17 00:00:00 2001 From: Patryk Kaminski Date: Tue, 7 Oct 2025 13:23:59 +0200 Subject: [PATCH 3/5] Update missing returned object info in docstring Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- devops/scripts/benchmarks/benches/compute_metadata.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/devops/scripts/benchmarks/benches/compute_metadata.py b/devops/scripts/benchmarks/benches/compute_metadata.py index d6996f90234e4..938795bcf0059 100644 --- a/devops/scripts/benchmarks/benches/compute_metadata.py +++ b/devops/scripts/benchmarks/benches/compute_metadata.py @@ -120,8 +120,12 @@ def generate_metadata_from_benchmarks( def _generate_metadata(self, group_name: str) -> BenchmarkMetadata: """ Generate metadata for a specific benchmark group. + Args: group_name: Name of the benchmark group + + Returns: + BenchmarkMetadata: Metadata object describing the specified benchmark group. """ base_metadata = self._base_group_metadata.get( group_name.split()[0], BaseGroupMetadata() From e315350ade615587016b9af3294f114839776d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Kami=C5=84ski?= Date: Tue, 7 Oct 2025 12:29:46 +0000 Subject: [PATCH 4/5] Be explicit about explicit_group format Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- devops/scripts/benchmarks/benches/compute_metadata.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/devops/scripts/benchmarks/benches/compute_metadata.py b/devops/scripts/benchmarks/benches/compute_metadata.py index 938795bcf0059..7c81cdc4cd954 100644 --- a/devops/scripts/benchmarks/benches/compute_metadata.py +++ b/devops/scripts/benchmarks/benches/compute_metadata.py @@ -127,8 +127,9 @@ def _generate_metadata(self, group_name: str) -> BenchmarkMetadata: Returns: BenchmarkMetadata: Metadata object describing the specified benchmark group. """ + base_group_name = self._extract_base_group_name(group_name) base_metadata = self._base_group_metadata.get( - group_name.split()[0], BaseGroupMetadata() + base_group_name, BaseGroupMetadata() ) return BenchmarkMetadata( type="group", @@ -140,3 +141,11 @@ def _generate_metadata(self, group_name: str) -> BenchmarkMetadata: range_max=base_metadata.range_max, explicit_group=group_name, ) + + def _extract_base_group_name(self, group_name: str) -> str: + """ + Extracts the base group name from a group name string. + Assumes group names are in the format 'BaseGroupName [Variant]'. + If the format changes, this method should be updated accordingly. + """ + return group_name.split()[0] From a918231a4a67e099a23ff73031bb7f1507d7c655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Kami=C5=84ski?= Date: Tue, 7 Oct 2025 12:30:24 +0000 Subject: [PATCH 5/5] Apply linter --- devops/scripts/benchmarks/benches/compute_metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/devops/scripts/benchmarks/benches/compute_metadata.py b/devops/scripts/benchmarks/benches/compute_metadata.py index 7c81cdc4cd954..bce663ee88e9a 100644 --- a/devops/scripts/benchmarks/benches/compute_metadata.py +++ b/devops/scripts/benchmarks/benches/compute_metadata.py @@ -54,6 +54,7 @@ class Tags: defaults=(None, None, None, [], None, None), ) + class ComputeMetadataGenerator: """ Generates metadata for Compute Benchmark groups.