diff --git a/.github/workflows/sycl-detect-changes.yml b/.github/workflows/sycl-detect-changes.yml index 281e60db073b7..964a14b4035d8 100644 --- a/.github/workflows/sycl-detect-changes.yml +++ b/.github/workflows/sycl-detect-changes.yml @@ -67,6 +67,8 @@ jobs: - devops/dependencies-igc-dev.json benchmarks: - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' perf-tests: - sycl/test-e2e/PerformanceTests/** esimd: diff --git a/.github/workflows/sycl-linux-precommit.yml b/.github/workflows/sycl-linux-precommit.yml index bba5f526e5808..eae8323f95449 100644 --- a/.github/workflows/sycl-linux-precommit.yml +++ b/.github/workflows/sycl-linux-precommit.yml @@ -9,6 +9,7 @@ on: - sycl - sycl-rel-** # Do not run builds if changes are only in the following locations + # Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead) paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' @@ -32,6 +33,9 @@ on: - 'unified-runtime/test/**' - 'unified-runtime/third_party/**' - 'unified-runtime/tools/**' + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' concurrency: # Cancel a currently running workflow from the same PR, branch or tag. @@ -225,29 +229,6 @@ jobs: skip_run: ${{matrix.use_igc_dev && contains(github.event.pull_request.labels.*.name, 'ci-no-devigc') || 'false'}} env: ${{ matrix.env || (contains(needs.detect_changes.outputs.filters, 'esimd') && '{}' || '{"LIT_FILTER_OUT":"ESIMD/"}') }} - test_benchmark_scripts: - needs: [build, detect_changes] - if: | - always() && !cancelled() - && needs.build.outputs.build_conclusion == 'success' - && contains(needs.detect_changes.outputs.filters, 'benchmarks') - uses: ./.github/workflows/sycl-linux-run-tests.yml - with: - name: Benchmark suite precommit testing - runner: '["PVC_PERF"]' - image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest - image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN - target_devices: 'level_zero:gpu' - tests_selector: benchmarks - benchmark_upload_results: false - benchmark_preset: 'Minimal' - benchmark_dry_run: true - benchmark_exit_on_failure: true - repo_ref: ${{ github.sha }} - toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }} - test-perf: needs: [build, detect_changes] if: | diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index f247256da4ab3..cc8733aaa830e 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -134,6 +134,7 @@ on: type: string default: 'Minimal' required: False + # dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs) benchmark_dry_run: description: | Whether or not to fail the workflow upon a regression. diff --git a/.github/workflows/sycl-nightly-benchmarking.yml b/.github/workflows/sycl-nightly-benchmarking.yml deleted file mode 100644 index 9d1098ee68b33..0000000000000 --- a/.github/workflows/sycl-nightly-benchmarking.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: SYCL Nightly Benchmarking - -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * *' - # 3 hours ahead of SYCL nightly - -permissions: read-all - -jobs: - ubuntu2204_build: - if: github.repository == 'intel/llvm' - uses: ./.github/workflows/sycl-linux-build.yml - secrets: inherit - with: - build_cache_root: "/__w/" - build_configure_extra_args: '--no-assertions' - build_image: ghcr.io/intel/llvm/ubuntu2404_build:latest - - toolchain_artifact: sycl_linux_default - toolchain_artifact_filename: sycl_linux.tar.gz - - run-sycl-benchmarks: - needs: [ubuntu2204_build] - if: ${{ always() && !cancelled() && needs.ubuntu2204_build.outputs.build_conclusion == 'success' }} - strategy: - fail-fast: false - matrix: - runner: ['["PVC_PERF"]', '["BMG_PERF"]'] - backend: ['level_zero:gpu', 'level_zero_v2:gpu'] - include: - - ref: ${{ github.sha }} - save_name: 'Baseline' - preset: 'Full' - uses: ./.github/workflows/sycl-linux-run-tests.yml - secrets: inherit - with: - name: Run compute-benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}) - runner: ${{ matrix.runner }} - image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest - image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN - target_devices: ${{ matrix.backend }} - tests_selector: benchmarks - benchmark_upload_results: true - benchmark_save_name: ${{ matrix.save_name }} - benchmark_preset: ${{ matrix.preset }} - repo_ref: ${{ matrix.ref }} - toolchain_artifact: ${{ needs.ubuntu2204_build.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.ubuntu2204_build.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.toolchain_decompress_command }} - diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index 0fd16fd77dd4f..7d5b0cb744867 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -1,59 +1,19 @@ -name: Run Benchmarks +# A combined workflow for all benchmarks-related jobs for SYCL and UR. +# Supports both manual triggering (dispatch) and nightly runs. +# It also tests changes to benchmark scripts/framework in PR, if modified. +name: SYCL Run Benchmarks on: - workflow_call: - inputs: - preset: - type: string - description: | - Benchmark presets to run: See /devops/scripts/benchmarks/presets.py - required: false - default: 'Minimal' # Only compute-benchmarks - pr_no: - type: string - description: | - PR no. to build SYCL from if specified: SYCL will be built from HEAD - of incoming branch used by the specified PR no. - - If both pr_no and commit_hash are empty, the latest commit in - deployment branch will be used. - required: false - default: '' - commit_hash: - type: string - description: | - Commit hash (within intel/llvm) to build SYCL from if specified. - - If both pr_no and commit_hash are empty, the latest commit in - deployment branch will be used. - required: false - default: '' - save_name: - type: string - description: | - Specify a custom name to use for the benchmark result: If uploading - results, this will be the name used to refer results from the current - run. - required: false - default: '' - upload_results: - type: string # true/false: workflow_dispatch does not support booleans - description: | - Upload results to https://intel.github.io/llvm/benchmarks/. - required: true - exit_on_failure: - type: string # true/false: workflow_dispatch does not support booleans - description: | - Fail benchmark script on any error. Limit number of iterations to just test correctness. - required: false - default: 'false' - runner: - type: string - required: true - backend: - type: string - required: true - + schedule: + # 3 hours ahead of SYCL nightly + - cron: '0 0 * * *' + # Run on pull requests only when a benchmark-related files were changed. + pull_request: + # These paths are exactly the same as in sycl-linux/windows-precommit.yml (to ignore over there) + paths: + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' workflow_dispatch: inputs: preset: @@ -66,6 +26,8 @@ on: - Minimal - Normal - Test + - Gromacs + - OneDNN default: 'Minimal' # Only compute-benchmarks pr_no: type: string @@ -115,13 +77,14 @@ on: options: - 'level_zero:gpu' - 'level_zero_v2:gpu' - # As of #17407, sycl-linux-build now builds v2 by default permissions: read-all jobs: - sanitize_inputs: - name: Sanitize inputs + # Manual trigger (dispatch) path: + sanitize_inputs_dispatch: + name: '[Dispatch] Sanitize inputs' + if: github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest env: COMMIT_HASH: ${{ inputs.commit_hash }} @@ -169,12 +132,12 @@ jobs: echo "Final sanitized values:" cat $GITHUB_OUTPUT - build_sycl: - name: Build SYCL - needs: [ sanitize_inputs ] + build_sycl_dispatch: + name: '[Dispatch] Build SYCL' + needs: [ sanitize_inputs_dispatch ] uses: ./.github/workflows/sycl-linux-build.yml with: - build_ref: ${{ needs.sanitize_inputs.outputs.build_ref }} + build_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }} build_cache_root: "/__w/" build_cache_suffix: "prod_noassert" build_configure_extra_args: "--no-assertions" @@ -182,12 +145,12 @@ jobs: cc: clang cxx: clang++ changes: '[]' - toolchain_artifact: sycl_linux_prod_noassert - run_benchmarks_build: - name: Run Benchmarks on Build - needs: [ build_sycl, sanitize_inputs ] + benchmark_dispatch: + name: '[Dispatch] Benchmarks' + needs: [ build_sycl_dispatch, sanitize_inputs_dispatch ] + if: always() && !cancelled() && needs.build_sycl_dispatch.outputs.build_conclusion == 'success' strategy: matrix: include: @@ -197,17 +160,102 @@ jobs: uses: ./.github/workflows/sycl-linux-run-tests.yml secrets: inherit with: - name: Run compute-benchmarks (${{ matrix.save_name }}, ${{ matrix.runner }}, ${{ matrix.backend }}) + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" runner: ${{ matrix.runner }} image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: ${{ matrix.backend }} tests_selector: benchmarks benchmark_upload_results: ${{ inputs.upload_results }} - benchmark_save_name: ${{ needs.sanitize_inputs.outputs.benchmark_save_name }} + benchmark_save_name: ${{ needs.sanitize_inputs_dispatch.outputs.benchmark_save_name }} benchmark_preset: ${{ inputs.preset }} benchmark_exit_on_failure: ${{ inputs.exit_on_failure }} - repo_ref: ${{ needs.sanitize_inputs.outputs.build_ref }} - toolchain_artifact: ${{ needs.build_sycl.outputs.toolchain_artifact }} - toolchain_artifact_filename: ${{ needs.build_sycl.outputs.toolchain_artifact_filename }} - toolchain_decompress_command: ${{ needs.build_sycl.outputs.toolchain_decompress_command }} + repo_ref: ${{ needs.sanitize_inputs_dispatch.outputs.build_ref }} + toolchain_artifact: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build_sycl_dispatch.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build_sycl_dispatch.outputs.toolchain_decompress_command }} + # END manual trigger (dispatch) path + + # Nightly benchmarking path: + build_nightly: + name: '[Nightly] Build SYCL' + if: github.repository == 'intel/llvm' && github.event_name == 'schedule' + uses: ./.github/workflows/sycl-linux-build.yml + secrets: inherit + with: + build_cache_root: "/__w/" + build_configure_extra_args: '--no-assertions' + build_image: ghcr.io/intel/llvm/ubuntu2404_build:latest + + toolchain_artifact: sycl_linux_default + toolchain_artifact_filename: sycl_linux.tar.gz + + benchmark_nightly: + name: '[Nightly] Benchmarks' + needs: [build_nightly] + if: always() && !cancelled() && needs.build_nightly.outputs.build_conclusion == 'success' + strategy: + fail-fast: false + matrix: + runner: ['["PVC_PERF"]', '["BMG_PERF"]'] + backend: ['level_zero:gpu', 'level_zero_v2:gpu'] + include: + - ref: ${{ github.sha }} + save_name: 'Baseline' + preset: 'Full' + uses: ./.github/workflows/sycl-linux-run-tests.yml + secrets: inherit + with: + name: "Benchmarks (${{ matrix.runner }}, ${{ matrix.backend }}, preset: ${{ matrix.preset }})" + runner: ${{ matrix.runner }} + image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: ${{ matrix.backend }} + tests_selector: benchmarks + benchmark_upload_results: true + benchmark_save_name: ${{ matrix.save_name }} + benchmark_preset: ${{ matrix.preset }} + repo_ref: ${{ matrix.ref }} + toolchain_artifact: ${{ needs.build_nightly.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build_nightly.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build_nightly.outputs.toolchain_decompress_command }} + # END nightly benchmarking path + + # Benchmark framework builds and runs on PRs path: + build_pr: + name: '[PR] Build SYCL' + if: github.event_name == 'pull_request' + uses: ./.github/workflows/sycl-linux-build.yml + with: + build_ref: ${{ github.sha }} + build_cache_root: "/__w/" + build_cache_suffix: "default" + # Docker image has last nightly pre-installed and added to the PATH + build_image: "ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest" + cc: clang + cxx: clang++ + changes: ${{ needs.detect_changes.outputs.filters }} + toolchain_artifact: sycl_linux_default + + # TODO: When we have stable BMG runner(s), consider moving this job to that runner. + test_benchmark_framework: + name: '[PR] Benchmark suite testing' + needs: [build_pr] + if: always() && !cancelled() && needs.build_pr.outputs.build_conclusion == 'success' + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: 'Framework test: PVC_PERF, L0, Minimal preset' + runner: '["PVC_PERF"]' + image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: 'level_zero:gpu' + tests_selector: benchmarks + benchmark_upload_results: false + benchmark_preset: 'Minimal' + benchmark_dry_run: true + benchmark_exit_on_failure: true + repo_ref: ${{ github.sha }} + toolchain_artifact: ${{ needs.build.outputs.toolchain_artifact }} + toolchain_artifact_filename: ${{ needs.build.outputs.toolchain_artifact_filename }} + toolchain_decompress_command: ${{ needs.build.outputs.toolchain_decompress_command }} + # END benchmark framework builds and runs on PRs path diff --git a/.github/workflows/sycl-windows-precommit.yml b/.github/workflows/sycl-windows-precommit.yml index ca7022c83b006..58254e8495815 100644 --- a/.github/workflows/sycl-windows-precommit.yml +++ b/.github/workflows/sycl-windows-precommit.yml @@ -7,6 +7,7 @@ on: - llvmspirv_pulldown - sycl-rel-** # Do not run builds if changes are only in the following locations + # Note: benchmark-related paths are the same as in sycl-ur-perf-benchmarking.yml (to run there instead) paths-ignore: - '.github/ISSUE_TEMPLATE/**' - '.github/CODEOWNERS' @@ -32,6 +33,9 @@ on: - 'unified-runtime/test/**' - 'unified-runtime/third_party/**' - 'unified-runtime/tools/**' + - 'devops/scripts/benchmarks/**' + - 'devops/actions/run-tests/benchmark/**' + - '.github/workflows/sycl-ur-perf-benchmarking.yml' permissions: read-all diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index 13dc4cf7cbf66..073501dcbb227 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -29,6 +29,7 @@ inputs: type: string required: False default: "" + # dry-run is passed only to compare.py (to not fail on regression), not to main.py (where such flag would omit all benchmark runs) dry_run: type: string required: False diff --git a/devops/scripts/benchmarks/CONTRIB.md b/devops/scripts/benchmarks/CONTRIB.md index 7d1d1150e1353..fb1964dad7fe8 100644 --- a/devops/scripts/benchmarks/CONTRIB.md +++ b/devops/scripts/benchmarks/CONTRIB.md @@ -170,7 +170,7 @@ The benchmark suite generates an interactive HTML dashboard that visualizes `Res * If adding to an existing category, modify the corresponding `Suite` class (e.g., `benches/compute.py`) to instantiate and return your new benchmark in its `benchmarks()` method. * If creating a new category, create a new `Suite` class inheriting from `benches.base.Suite`. Implement `name()` and `benchmarks()`. Add necessary `setup()` if the suite requires shared setup. Add group metadata via `additional_metadata()` if needed. 3. **Register Suite:** Import and add your new `Suite` instance to the `suites` list in `main.py`. -4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` to include the new suite in presets' description. +4. **Add to Presets:** If adding a new suite, add its `name()` to the relevant lists in `presets.py` (e.g., "Full", "Normal") so it runs with those presets. Update `README.md` and benchmarking workflow to include the new suite in presets' description/choices. ## Recommendations diff --git a/devops/scripts/benchmarks/README.md b/devops/scripts/benchmarks/README.md index e7a2a1e743bb2..5429f75788015 100644 --- a/devops/scripts/benchmarks/README.md +++ b/devops/scripts/benchmarks/README.md @@ -115,7 +115,7 @@ The benchmarks scripts are used in a GitHub Actions workflow, and can be automat ![compute benchmarks](workflow.png "Compute Benchmarks CI job") -To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). +To execute the benchmarks in CI, navigate to the `Actions` tab and then go to the `SYCL Run Benchmarks` workflow. Here, you will find a list of previous runs and a "Run workflow" button. Upon clicking the button, you will be prompted to fill in a form to customize your benchmark run. Important field is the `PR number`, which is the identifier for the Pull Request against which you want the benchmarks to run. Instead, you can specify `Commit hash` from within intel/llvm repository, or leave both empty to run benchmarks against the branch/tag the workflow started from (the value from dropdown list at the top). Once all the information is entered, click the "Run workflow" button to initiate a new workflow run. This will execute the benchmarks and then post the results as a comment on the specified Pull Request.