From 0f5b10ad332634142a4a24bf2d4fb89dcfb289e5 Mon Sep 17 00:00:00 2001 From: "Kornev, Nikita" Date: Fri, 13 Jun 2025 16:32:27 +0200 Subject: [PATCH 1/3] Copy-paste some workflows and devops from sycl --- .github/workflows/sycl-hardening-check.yml | 90 ++ .github/workflows/sycl-linux-build.yml | 54 +- .github/workflows/sycl-linux-run-tests.yml | 73 +- .github/workflows/sycl-rel-nightly.yml | 88 +- .github/workflows/sycl-windows-build.yml | 72 +- .github/workflows/sycl-windows-run-tests.yml | 69 +- .../actions/benchmarking/aggregate/action.yml | 95 -- devops/actions/cached_checkout/action.yml | 2 +- devops/actions/reset_gpu/action.yml | 32 + devops/actions/run-tests/benchmark/action.yml | 217 +++- devops/actions/run-tests/cts/action.yml | 14 +- devops/actions/run-tests/e2e/action.yml | 14 +- .../actions/run-tests/windows/cts/action.yml | 14 +- .../actions/run-tests/windows/e2e/action.yml | 112 ++ devops/benchmarking/config.ini | 44 - devops/benchmarking/constants.ini | 48 - devops/benchmarking/enabled_tests.conf | 8 - devops/containers/ubuntu2204_build.Dockerfile | 2 +- .../ubuntu2204_intel_drivers.Dockerfile | 9 +- devops/containers/ubuntu2404_base.Dockerfile | 5 + .../ubuntu2404_intel_drivers.Dockerfile | 9 +- devops/dependencies-igc-dev.json | 8 +- devops/dependencies.json | 66 +- devops/scripts/benchmarking/aggregate.py | 205 --- devops/scripts/benchmarking/benchmark.sh | 300 ----- devops/scripts/benchmarking/common.py | 196 --- devops/scripts/benchmarking/compare.py | 101 -- devops/scripts/benchmarking/load_config.py | 30 - devops/scripts/benchmarks/.gitignore | 1 + devops/scripts/benchmarks/CONTRIB.md | 98 ++ devops/scripts/benchmarks/README.md | 74 ++ devops/scripts/benchmarks/benches/base.py | 195 +++ devops/scripts/benchmarks/benches/compute.py | 1007 +++++++++++++++ devops/scripts/benchmarks/benches/gromacs.py | 274 ++++ devops/scripts/benchmarks/benches/llamacpp.py | 184 +++ .../scripts/benchmarks/benches/syclbench.py | 543 ++++++++ devops/scripts/benchmarks/benches/test.py | 105 ++ devops/scripts/benchmarks/benches/umf.py | 249 ++++ devops/scripts/benchmarks/benches/velocity.py | 615 +++++++++ devops/scripts/benchmarks/compare.py | 379 ++++++ devops/scripts/benchmarks/history.py | 225 ++++ devops/scripts/benchmarks/html/config.js | 5 + devops/scripts/benchmarks/html/index.html | 86 ++ devops/scripts/benchmarks/html/scripts.js | 1111 +++++++++++++++++ devops/scripts/benchmarks/html/styles.css | 357 ++++++ devops/scripts/benchmarks/main.py | 643 ++++++++++ devops/scripts/benchmarks/options.py | 97 ++ devops/scripts/benchmarks/output_html.py | 59 + devops/scripts/benchmarks/output_markdown.py | 415 ++++++ devops/scripts/benchmarks/presets.py | 81 ++ devops/scripts/benchmarks/requirements.txt | 5 + devops/scripts/benchmarks/utils/aggregate.py | 53 + .../benchmarks/utils/compute_runtime.py | 229 ++++ .../benchmarks/utils/detect_versions.cpp | 82 ++ .../benchmarks/utils/detect_versions.py | 250 ++++ devops/scripts/benchmarks/utils/oneapi.py | 115 ++ devops/scripts/benchmarks/utils/result.py | 74 ++ devops/scripts/benchmarks/utils/utils.py | 176 +++ devops/scripts/benchmarks/utils/validate.py | 72 ++ devops/scripts/benchmarks/workflow.png | Bin 0 -> 217603 bytes devops/scripts/install_build_tools.sh | 1 + devops/scripts/install_drivers.sh | 22 +- devops/scripts/update_drivers.py | 39 +- 63 files changed, 8595 insertions(+), 1303 deletions(-) create mode 100644 .github/workflows/sycl-hardening-check.yml delete mode 100644 devops/actions/benchmarking/aggregate/action.yml create mode 100644 devops/actions/reset_gpu/action.yml create mode 100644 devops/actions/run-tests/windows/e2e/action.yml delete mode 100644 devops/benchmarking/config.ini delete mode 100644 devops/benchmarking/constants.ini delete mode 100644 devops/benchmarking/enabled_tests.conf delete mode 100644 devops/scripts/benchmarking/aggregate.py delete mode 100755 devops/scripts/benchmarking/benchmark.sh delete mode 100644 devops/scripts/benchmarking/common.py delete mode 100644 devops/scripts/benchmarking/compare.py delete mode 100644 devops/scripts/benchmarking/load_config.py create mode 100644 devops/scripts/benchmarks/.gitignore create mode 100644 devops/scripts/benchmarks/CONTRIB.md create mode 100644 devops/scripts/benchmarks/README.md create mode 100644 devops/scripts/benchmarks/benches/base.py create mode 100644 devops/scripts/benchmarks/benches/compute.py create mode 100644 devops/scripts/benchmarks/benches/gromacs.py create mode 100644 devops/scripts/benchmarks/benches/llamacpp.py create mode 100644 devops/scripts/benchmarks/benches/syclbench.py create mode 100644 devops/scripts/benchmarks/benches/test.py create mode 100644 devops/scripts/benchmarks/benches/umf.py create mode 100644 devops/scripts/benchmarks/benches/velocity.py create mode 100644 devops/scripts/benchmarks/compare.py create mode 100644 devops/scripts/benchmarks/history.py create mode 100644 devops/scripts/benchmarks/html/config.js create mode 100644 devops/scripts/benchmarks/html/index.html create mode 100644 devops/scripts/benchmarks/html/scripts.js create mode 100644 devops/scripts/benchmarks/html/styles.css create mode 100755 devops/scripts/benchmarks/main.py create mode 100644 devops/scripts/benchmarks/options.py create mode 100644 devops/scripts/benchmarks/output_html.py create mode 100644 devops/scripts/benchmarks/output_markdown.py create mode 100644 devops/scripts/benchmarks/presets.py create mode 100644 devops/scripts/benchmarks/requirements.txt create mode 100644 devops/scripts/benchmarks/utils/aggregate.py create mode 100644 devops/scripts/benchmarks/utils/compute_runtime.py create mode 100644 devops/scripts/benchmarks/utils/detect_versions.cpp create mode 100644 devops/scripts/benchmarks/utils/detect_versions.py create mode 100644 devops/scripts/benchmarks/utils/oneapi.py create mode 100644 devops/scripts/benchmarks/utils/result.py create mode 100644 devops/scripts/benchmarks/utils/utils.py create mode 100644 devops/scripts/benchmarks/utils/validate.py create mode 100644 devops/scripts/benchmarks/workflow.png diff --git a/.github/workflows/sycl-hardening-check.yml b/.github/workflows/sycl-hardening-check.yml new file mode 100644 index 0000000000000..3d243c638ce35 --- /dev/null +++ b/.github/workflows/sycl-hardening-check.yml @@ -0,0 +1,90 @@ +name: SYCL hardening check + +permissions: read-all + +on: + workflow_call: + inputs: + sycl_linux_artifact: + type: string + sycl_linux_archive: + type: string + sycl_linux_decompress_command: + type: string + + sycl_windows_artifact: + type: string + sycl_windows_archive: + type: string + +jobs: + hardening_check: + runs-on: ubuntu-latest + + steps: + - name: Install hardening-check + run: | + sudo apt update + sudo apt install -y devscripts + + - name: Download SYCL toolchain + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.sycl_linux_artifact }} + + - name: Extract SYCL toolchain + run: | + mkdir toolchain + tar -I '${{ inputs.sycl_linux_decompress_command }}' -xf ${{ inputs.sycl_linux_archive }} -C toolchain + + - name: Perform checks + run: | + for file in ./toolchain/bin/*; do + hardening-check "$file" | tee -a "./hardening-check.txt" + done + + for file in $(find ./toolchain/lib/ -type f -name "*.so*"); do + hardening-check "$file" | tee -a "./hardening-check.txt" + done + + - uses: actions/upload-artifact@v4 + with: + name: hardening-check + path: hardening-check.txt + + winchecksec: + runs-on: windows-latest + + steps: + - name: Install winchecksec + run: | + curl -LO https://github.com/trailofbits/winchecksec/releases/download/v3.1.0/windows.x64.Release.zip + mkdir winchecksec + unzip "windows.x64.Release.zip" -d winchecksec + + - name: Download SYCL toolchain + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.sycl_windows_artifact }} + + - name: Extract SYCL toolchain + shell: bash + run: | + mkdir toolchain + tar -xf ${{ inputs.sycl_windows_archive }} -C toolchain + + - name: Download and check Windows artifacts + shell: bash + run: | + for file in $(find ./toolchain/bin/ -type f -name "*.exe"); do + ./winchecksec/build/Release/winchecksec.exe "$file" | tee -a "./winchecksec.txt" + done + + for file in $(find ./toolchain/bin/ -type f -name "*.dll"); do + ./winchecksec/build/Release/winchecksec.exe "$file" | tee -a "./winchecksec.txt" + done + + - uses: actions/upload-artifact@v4 + with: + name: winchecksec + path: winchecksec.txt diff --git a/.github/workflows/sycl-linux-build.yml b/.github/workflows/sycl-linux-build.yml index b76609f9d66bd..a230fdad40ad4 100644 --- a/.github/workflows/sycl-linux-build.yml +++ b/.github/workflows/sycl-linux-build.yml @@ -32,6 +32,10 @@ on: build_artifact_suffix: type: string required: true + build_target: + type: string + required: false + default: sycl-toolchain artifact_archive_name: type: string default: llvm_sycl.tar.zst @@ -46,7 +50,10 @@ on: default: 3 e2e_binaries_artifact: type: string - required: False + required: false + pack_release: + type: string + required: false outputs: build_conclusion: @@ -100,6 +107,10 @@ on: options: - 3 + pack_release: + type: string + required: false + permissions: read-all jobs: @@ -170,7 +181,8 @@ jobs: -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=SPIRV - name: Compile id: build - run: cmake --build $GITHUB_WORKSPACE/build --target sycl-toolchain + # Emulate default value for manual dispatch as we've run out of available arguments. + run: cmake --build $GITHUB_WORKSPACE/build --target ${{ inputs.build_target || 'sycl-toolchain' }} - name: check-llvm if: always() && !cancelled() && contains(inputs.changes, 'llvm') run: | @@ -215,11 +227,26 @@ jobs: # TODO consider moving this to Dockerfile. export LD_LIBRARY_PATH=/usr/local/cuda/compat/:/usr/local/cuda/lib64:$LD_LIBRARY_PATH LIT_OPTS="--allow-empty-runs" LIT_FILTER="e2e_test_requirements" cmake --build $GITHUB_WORKSPACE/build --target check-sycl - - name: Install + - name: Install sycl-toolchain if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} - # TODO replace utility installation with a single CMake target run: | cmake --build $GITHUB_WORKSPACE/build --target deploy-sycl-toolchain + + - name: Pack toolchain release + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' && inputs.pack_release == 'true' }} + run: tar -I '${{ steps.artifact_info.outputs.COMPRESS }}' -cf ${{ steps.artifact_info.outputs.ARCHIVE_NAME }} -C $GITHUB_WORKSPACE/build/install . + - name: Upload toolchain release + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' && inputs.pack_release == 'true' }} + uses: actions/upload-artifact@v4 + with: + name: sycl_linux_release + path: ${{ steps.artifact_info.outputs.ARCHIVE_NAME }} + retention-days: ${{ inputs.retention-days }} + + - name: Install utilities + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} + # TODO replace utility installation with a single CMake target + run: | cmake --build $GITHUB_WORKSPACE/build --target utils/FileCheck/install cmake --build $GITHUB_WORKSPACE/build --target utils/count/install cmake --build $GITHUB_WORKSPACE/build --target utils/not/install @@ -278,7 +305,7 @@ jobs: testing_mode: build-only target_devices: all binaries_artifact: ${{ inputs.e2e_binaries_artifact }} - cxx_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ + sycl_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ extra_lit_opts: --param sycl_build_targets="spir;nvidia;amd" - name: Remove E2E tests before spirv-backend run @@ -293,5 +320,20 @@ jobs: testing_mode: build-only target_devices: all binaries_artifact: ${{ inputs.e2e_binaries_artifact }}_spirv_backend - cxx_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ + sycl_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ extra_lit_opts: --param spirv-backend=True + + - name: Remove E2E tests before preview-mode run + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + run: rm -rf build-e2e + + - name: Build E2E tests in Preview Mode + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + uses: ./devops/actions/run-tests/e2e + with: + ref: ${{ inputs.ref || github.sha }} + testing_mode: build-only + target_devices: all + binaries_artifact: ${{ inputs.e2e_binaries_artifact }}_preview + sycl_compiler: $GITHUB_WORKSPACE/toolchain/bin/clang++ + extra_lit_opts: --param test-preview-mode=True diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index 2f3c02bf334ed..025e93b9037e7 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -25,7 +25,7 @@ on: required: False tests_selector: description: | - Three possible options: "e2e", "cts", and "compute-benchmarks". + Three possible options: "e2e", "cts", and "benchmarks". type: string default: "e2e" @@ -82,9 +82,6 @@ on: type: string default: 1 - reset_intel_gpu: - type: string - required: False install_igc_driver: type: string required: False @@ -114,6 +111,33 @@ on: default: '' required: False + benchmark_upload_results: + description: | + Set to true to upload results to git repository storing benchmarking + results. + type: string + default: 'false' + required: False + benchmark_save_name: + description: | + Save name to use for benchmark results: Save names are stored in + metadata of result file, and are used to identify benchmark results in + the same series (e.g. same configuration, same device, etc.). + + Note: Currently, benchmark result filenames are in the format of + ___YYYYMMDD_HHMMSS.json + type: string + default: '' + required: False + benchmark_preset: + description: | + Name of benchmark preset to run. + + See /devops/scripts/benchmarks/presets.py for all presets available. + type: string + default: 'Minimal' + required: False + workflow_dispatch: inputs: runner: @@ -153,7 +177,7 @@ on: options: - e2e - cts - - compute-benchmarks + - benchmarks env: description: | @@ -171,14 +195,6 @@ on: Extra options to be added to LIT_OPTS. default: '' - reset_intel_gpu: - description: | - Reset Intel GPUs - type: choice - options: - - false - - true - e2e_testing_mode: type: choice options: @@ -199,24 +215,6 @@ jobs: options: ${{ inputs.image_options }} env: ${{ fromJSON(inputs.env) }} steps: - - name: Reset Intel GPU - if: inputs.reset_intel_gpu == 'true' - shell: bash - run: | - if [[ '${{ inputs.runner }}' == '["Linux", "bmg"]' ]]; then - sudo bash -c 'echo 0000:05:00.0 > /sys/bus/pci/drivers/xe/unbind' - sudo bash -c 'echo 1 > /sys/bus/pci/devices/0000:05:00.0/reset' - sudo bash -c 'echo 0000:05:00.0 > /sys/bus/pci/drivers/xe/bind' - else - sudo mount -t debugfs none /sys/kernel/debug - base_dir="/sys/kernel/debug/dri" - - for dir in "$base_dir"/*; do - if [ -f "$dir/i915_wedged" ]; then - sudo bash -c 'echo 1 > $0/i915_wedged' $dir - fi - done - fi - uses: actions/checkout@v4 with: ref: ${{ inputs.devops_ref || inputs.repo_ref }} @@ -224,6 +222,8 @@ jobs: devops - name: Register cleanup after job is finished uses: ./devops/actions/cleanup + - name: Reset Intel GPU + uses: ./devops/actions/reset_gpu - name: Install drivers if: inputs.install_igc_driver == 'true' || inputs.install_dev_igc_driver == 'true' env: @@ -308,6 +308,7 @@ jobs: - name: Run E2E Tests if: inputs.tests_selector == 'e2e' uses: ./devops/actions/run-tests/e2e + timeout-minutes: 60 with: ref: ${{ inputs.tests_ref || inputs.repo_ref || github.sha }} binaries_artifact: ${{ inputs.e2e_binaries_artifact }} @@ -320,6 +321,9 @@ jobs: - name: Run SYCL CTS Tests if: inputs.tests_selector == 'cts' uses: ./devops/actions/run-tests/cts + # Normally this job takes less than 10m. But sometimes it hangs up and + # reaches the 360m limit. Set a lower limit to free up the runner earlier. + timeout-minutes: 35 with: ref: ${{ inputs.tests_ref || 'main' }} cts_exclude_ref: ${{ inputs.repo_ref }} @@ -329,11 +333,14 @@ jobs: target_devices: ${{ inputs.target_devices }} retention-days: ${{ inputs.retention-days }} - - name: Run compute-benchmarks on SYCL - if: inputs.tests_selector == 'compute-benchmarks' + - name: Run benchmarks + if: inputs.tests_selector == 'benchmarks' uses: ./devops/actions/run-tests/benchmark with: target_devices: ${{ inputs.target_devices }} + upload_results: ${{ inputs.benchmark_upload_results }} + save_name: ${{ inputs.benchmark_save_name }} + preset: ${{ inputs.benchmark_preset }} env: RUNNER_TAG: ${{ inputs.runner }} GITHUB_TOKEN: ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }} diff --git a/.github/workflows/sycl-rel-nightly.yml b/.github/workflows/sycl-rel-nightly.yml index 3c37796e749f7..76542ad12ffbb 100644 --- a/.github/workflows/sycl-rel-nightly.yml +++ b/.github/workflows/sycl-rel-nightly.yml @@ -2,6 +2,12 @@ name: SYCL Release Branch Nightly on: workflow_dispatch: + inputs: + testing_branch: + description: | + Branch to test, e.g. sycl-rel-6_0_0. + If nothing is specified, the last release branch is used. + schedule: - cron: '0 3 * * *' @@ -20,7 +26,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - ref: sycl-rel-6_0_0 + ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} - run: git show --quiet | tee -a $GITHUB_STEP_SUMMARY - id: is_new_commit @@ -38,9 +44,10 @@ jobs: with: build_cache_root: "/__w/" build_artifact_suffix: default - build_configure_extra_args: '--hip --cuda' + build_configure_extra_args: '--disable-jit --no-assertions --add_security_flags=sanitize --hip --cuda' build_image: ghcr.io/intel/llvm/ubuntu2204_build:latest - build_ref: sycl-rel-6_0_0 + build_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} + pack_release: 'true' # We upload the build for people to download/use, override its name and # prefer widespread gzip compression. @@ -57,39 +64,37 @@ jobs: runner: '["Linux", "amdgpu"]' image_options: -u 1001 --device=/dev/dri --device=/dev/kfd target_devices: hip:gpu - tests_selector: e2e - - name: Intel L0 GPU + - name: Intel L0 Gen12 GPU runner: '["Linux", "gen12"]' image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: level_zero:gpu - reset_intel_gpu: true - tests_selector: e2e extra_lit_opts: --param gpu-intel-gen12=True - - name: Intel OCL GPU + - name: Intel L0 Battlemage GPU + runner: '["Linux", "bmg"]' + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: level_zero:gpu + + - name: Intel OCL Gen12 GPU runner: '["Linux", "gen12"]' image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: opencl:gpu - reset_intel_gpu: true - tests_selector: e2e extra_lit_opts: --param gpu-intel-gen12=True - name: Intel OCL CPU runner: '["Linux", "gen12"]' image_options: -u 1001 --privileged --cap-add SYS_ADMIN target_devices: opencl:cpu - tests_selector: e2e uses: ./.github/workflows/sycl-linux-run-tests.yml with: name: ${{ matrix.name }} runner: ${{ matrix.runner }} image_options: ${{ matrix.image_options }} target_devices: ${{ matrix.target_devices }} - tests_selector: ${{ matrix.tests_selector }} + tests_selector: e2e extra_lit_opts: ${{ matrix.extra_lit_opts }} - reset_intel_gpu: ${{ matrix.reset_intel_gpu }} - repo_ref: sycl-rel-6_0_0 + repo_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} devops_ref: sycl sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} @@ -100,7 +105,9 @@ jobs: if: ${{ github.repository == 'intel/llvm' && needs.check_for_new_commits.outputs.is_new_commit != 'false' }} uses: ./.github/workflows/sycl-windows-build.yml with: - ref: sycl-rel-6_0_0 + ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} + build_configure_extra_args: '--disable-jit --no-assertions --add_security_flags=sanitize' + pack_release: 'true' # We upload both Linux/Windows build via Github's "Releases" # functionality, make sure Linux/Windows names follow the same pattern. @@ -113,13 +120,24 @@ jobs: always() && !cancelled() && needs.build-win.outputs.build_conclusion == 'success' + strategy: + fail-fast: false + matrix: + include: + - name: Intel GEN12 Graphics with Level Zero + runner: '["Windows","gen12"]' + # 6_1_0 doesn't support the auto-detection, keep it until 6_2_0. + extra_lit_opts: --param gpu-intel-gen12=True + - name: Intel Battlemage Graphics with Level Zero + runner: '["Windows","bmg"]' uses: ./.github/workflows/sycl-windows-run-tests.yml with: - name: Intel GEN12 Graphics with Level Zero - runner: '["Windows","gen12"]' + name: ${{ matrix.name }} + runner: ${{ matrix.runner }} + target_devices: level_zero:gpu sycl_toolchain_archive: ${{ needs.build-win.outputs.artifact_archive_name }} - extra_lit_opts: --param gpu-intel-gen12=True - ref: sycl-rel-6_0_0 + extra_lit_opts: ${{ matrix.extra_lit_opts }} + repo_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} devops_ref: sycl cuda-aws-start: @@ -129,7 +147,7 @@ jobs: secrets: inherit with: mode: start - ref: sycl-rel-6_0_0 + ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} cuda-run-tests: needs: [ubuntu2204_build, cuda-aws-start] @@ -141,7 +159,7 @@ jobs: image: ghcr.io/intel/llvm/ubuntu2204_build:latest image_options: -u 1001 --gpus all --cap-add SYS_ADMIN --env NVIDIA_DISABLE_REQUIRE=1 target_devices: cuda:gpu - repo_ref: sycl-rel-6_0_0 + repo_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} devops_ref: sycl sycl_toolchain_artifact: sycl_linux_default @@ -150,12 +168,12 @@ jobs: cuda-aws-stop: needs: [cuda-aws-start, cuda-run-tests] - if: always() + if: always() && ${{ needs.cuda-aws-start.result != 'skipped' }} uses: ./.github/workflows/sycl-aws.yml secrets: inherit with: mode: stop - ref: sycl-rel-6_0_0 + ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} build-sycl-cts: needs: ubuntu2204_build @@ -167,11 +185,13 @@ jobs: cts_testing_mode: 'build-only' image_options: -u 1001 --device=/dev/dri --privileged --cap-add SYS_ADMIN tests_selector: cts - repo_ref: sycl-rel-6_0_0 + repo_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} devops_ref: sycl + tests_ref: 87f8677c4b67cfe56ad6f09246036f10c306c977 sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + sycl_cts_artifact: sycl_cts_bin_linux run-sycl-cts: needs: [ubuntu2204_build, build-sycl-cts] @@ -197,9 +217,25 @@ jobs: image_options: ${{ matrix.image_options }} target_devices: ${{ matrix.target_devices }} tests_selector: cts - repo_ref: ${{ github.sha }} + repo_ref: ${{ inputs.testing_branch || 'sycl-rel-6_2' }} devops_ref: sycl sycl_toolchain_artifact: sycl_linux_default sycl_toolchain_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} sycl_toolchain_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} - sycl_cts_artifact: sycl_cts_bin + sycl_cts_artifact: sycl_cts_bin_linux + + hardening-check: + needs: [ubuntu2204_build, build-win] + if: | + always() + && !cancelled() + && needs.ubuntu2204_build.outputs.build_conclusion == 'success' + && needs.build-win.outputs.build_conclusion == 'success' + uses: ./.github/workflows/sycl-hardening-check.yml + with: + sycl_linux_artifact: sycl_linux_release + sycl_linux_archive: ${{ needs.ubuntu2204_build.outputs.artifact_archive_name }} + sycl_linux_decompress_command: ${{ needs.ubuntu2204_build.outputs.artifact_decompress_command }} + + sycl_windows_artifact: sycl_windows_release + sycl_windows_archive: ${{ needs.build-win.outputs.artifact_archive_name }} diff --git a/.github/workflows/sycl-windows-build.yml b/.github/workflows/sycl-windows-build.yml index dbf4bfd88922c..50a736c6ef067 100644 --- a/.github/workflows/sycl-windows-build.yml +++ b/.github/workflows/sycl-windows-build.yml @@ -10,6 +10,10 @@ on: build_configure_extra_args: type: string required: false + build_target: + type: string + required: false + default: sycl-toolchain changes: type: string description: 'Filter matches for the changed files in the PR' @@ -25,10 +29,16 @@ on: description: 'Artifacts retention period' type: string default: 3 - compiler: + e2e_binaries_artifact: + type: string + required: false + cxx: type: string required: false default: "cl" + pack_release: + type: string + required: false outputs: build_conclusion: @@ -52,6 +62,11 @@ on: build_configure_extra_args: type: string required: false + build_target: + type: choice + options: + - "sycl-toolchain" + - "all" artifact_archive_name: type: choice options: @@ -61,11 +76,14 @@ on: type: choice options: - 3 - compiler: + cxx: type: choice options: - cl - icx + pack_release: + type: string + required: false permissions: read-all @@ -80,9 +98,8 @@ jobs: - name: Detect hung tests if: always() shell: powershell - continue-on-error: true run: | - Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" -Headers @{Authorization = "Bearer ${{ github.token }}"} powershell.exe -File windows_detect_hung_tests.ps1 $exitCode = $LASTEXITCODE Remove-Item -Path "windows_detect_hung_tests.ps1" @@ -96,7 +113,7 @@ jobs: arch: amd64 - name: Setup oneAPI env uses: ./devops/actions/setup_windows_oneapi_env - if: ${{ always() && !cancelled() && inputs.compiler == 'icx' }} + if: ${{ always() && !cancelled() && inputs.cxx == 'icx' }} - name: Set env run: | git config --system core.longpaths true @@ -124,8 +141,8 @@ jobs: IF NOT EXIST D:\github\_work\cache\${{inputs.build_cache_suffix}} MKDIR D:\github\_work\cache\${{inputs.build_cache_suffix}} python.exe src/buildbot/configure.py -o build ^ --ci-defaults %ARGS% ^ - "-DCMAKE_C_COMPILER=${{inputs.compiler}}" ^ - "-DCMAKE_CXX_COMPILER=${{inputs.compiler}}" ^ + "-DCMAKE_C_COMPILER=${{inputs.cxx}}" ^ + "-DCMAKE_CXX_COMPILER=${{inputs.cxx}}" ^ "-DCMAKE_INSTALL_PREFIX=%GITHUB_WORKSPACE%\install" ^ -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ^ -DCMAKE_C_COMPILER_LAUNCHER=ccache ^ @@ -135,7 +152,7 @@ jobs: id: build shell: bash run: | - cmake --build build --target sycl-toolchain + cmake --build build --target ${{ inputs.build_target }} - name: check-llvm if: always() && !cancelled() && contains(inputs.changes, 'llvm') shell: bash @@ -179,12 +196,27 @@ jobs: name: sycl_windows_abi_symbols path: build/new_sycl_symbols_windows.dump retention-days: ${{ inputs.retention-days }} - - name: Install + - name: Install sycl-toolchain + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} + run: | + cmake --build build --target deploy-sycl-toolchain + + - name: Pack toolchain release + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' && inputs.pack_release == 'true' }} + run: tar -czf ${{ inputs.artifact_archive_name }} -C install . + - name: Upload toolchain release + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' && inputs.pack_release == 'true' }} + uses: actions/upload-artifact@v4 + with: + name: sycl_windows_release + path: ${{ inputs.artifact_archive_name }} + retention-days: ${{ inputs.retention-days }} + + - name: Install utilities if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} shell: bash # TODO replace utility installation with a single CMake target run: | - cmake --build build --target deploy-sycl-toolchain cmake --build build --target utils/FileCheck/install cmake --build build --target utils/count/install cmake --build build --target utils/not/install @@ -206,11 +238,29 @@ jobs: name: sycl_windows_default path: ${{ inputs.artifact_archive_name }} retention-days: ${{ inputs.retention-days }} + + - name: Setup SYCL toolchain + if: ${{ always() && !cancelled() && steps.build.conclusion == 'success' }} + run: | + echo "PATH=$env:GITHUB_WORKSPACE\\install\\bin;$env:PATH" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "LIB=$env:GITHUB_WORKSPACE\\install\\lib;$env:LIB" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + + - name: Build E2E tests + if: ${{ inputs.e2e_binaries_artifact && always() && !cancelled() && steps.build.conclusion == 'success' }} + uses: ./devops/actions/run-tests/windows/e2e + with: + ref: ${{ inputs.ref || github.sha }} + testing_mode: build-only + target_devices: all + binaries_artifact: ${{ inputs.e2e_binaries_artifact }} + extra_lit_opts: --param sycl_build_targets="spir" + cxx: ${{ inputs.cxx }} + - name: Detect hung tests if: always() shell: powershell run: | - Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" -Headers @{Authorization = "Bearer ${{ github.token }}"} powershell.exe -File windows_detect_hung_tests.ps1 $exitCode = $LASTEXITCODE Remove-Item -Path "windows_detect_hung_tests.ps1" diff --git a/.github/workflows/sycl-windows-run-tests.yml b/.github/workflows/sycl-windows-run-tests.yml index 91b30a4f3bc33..3dc57a80e0634 100644 --- a/.github/workflows/sycl-windows-run-tests.yml +++ b/.github/workflows/sycl-windows-run-tests.yml @@ -6,6 +6,7 @@ on: name: type: string required: True + runner: type: string required: True @@ -27,19 +28,17 @@ on: Extra options to be added to LIT_OPTS. type: string default: '' - ref: + + repo_ref: type: string required: False + description: | + Commit SHA or branch to checkout the intel/llvm repo. devops_ref: type: string required: False description: | - By default we checkout the devops directory from "inputs.ref" branch. - devops_ref may be specified to checkout the devops dir from different - branch. - Note: it doesn't affect ./devops/actions/run-tests/* as these actions - call checkout again and therefore override the devops directory, so - configs/dependecies from input.ref are used. + Commit SHA or branch to checkout the devops directory. tests_ref: type: string required: False @@ -59,11 +58,20 @@ on: default: '{}' required: False - compiler: + cxx: type: string required: false default: "cl" + e2e_testing_mode: + type: string + default: "full" + + e2e_binaries_artifact: + type: string + default: '' + required: False + cts_testing_mode: description: | Testing mode to run SYCL-CTS in, can be either `full`, `build-only` @@ -93,9 +101,8 @@ jobs: - name: Detect hung tests if: always() shell: powershell - continue-on-error: true run: | - Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" -Headers @{Authorization = "Bearer ${{ github.token }}"} powershell.exe -File windows_detect_hung_tests.ps1 $exitCode = $LASTEXITCODE Remove-Item -Path "windows_detect_hung_tests.ps1" @@ -104,13 +111,13 @@ jobs: with: sparse-checkout: | devops/actions - ref: ${{ inputs.devops_ref|| inputs.ref || github.sha }} + ref: ${{ inputs.devops_ref|| inputs.repo_ref || github.sha }} - uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 with: arch: amd64 - name: Setup oneAPI env uses: ./devops/actions/setup_windows_oneapi_env - if: ${{ always() && !cancelled() && inputs.compiler == 'icx' }} + if: ${{ always() && !cancelled() && inputs.cxx == 'icx' }} - name: Set env run: | git config --system core.longpaths true @@ -118,12 +125,14 @@ jobs: echo "C:\Program Files\Git\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Register cleanup after job is finished uses: ./devops/actions/cleanup - - uses: ./devops/actions/cached_checkout + - uses: actions/checkout@v4 if: inputs.tests_selector == 'e2e' with: path: llvm - ref: ${{ inputs.ref || github.sha }} - cache_path: "D:\\\\github\\\\_work\\\\repo_cache\\\\" + ref: ${{ inputs.repo_ref || github.sha }} + sparse-checkout: | + llvm/utils/lit + sycl/test-e2e - name: Download compiler toolchain uses: actions/download-artifact@v4 with: @@ -142,22 +151,20 @@ jobs: sycl-ls - run: | sycl-ls --verbose - - name: Configure E2E with Level Zero target - if: inputs.tests_selector == 'e2e' - shell: cmd - run: | - mkdir build-e2e - cmake -GNinja -B build-e2e -S.\llvm\sycl\test-e2e -DSYCL_TEST_E2E_TARGETS="level_zero:gpu" -DCMAKE_CXX_COMPILER="clang++" -DLEVEL_ZERO_LIBS_DIR="D:\\github\\level-zero_win-sdk\\lib" -DLEVEL_ZERO_INCLUDE="D:\\github\\level-zero_win-sdk\\include" -DLLVM_LIT="..\\llvm\\llvm\\utils\\lit\\lit.py" - - name: Run End-to-End tests + + - name: Run E2E Tests if: inputs.tests_selector == 'e2e' - shell: bash - run: | - # Run E2E tests. - if [[ ${{inputs.compiler}} == 'icx' ]]; then - export LIT_FILTER_OUT="compile_on_win_with_mdd" - fi - export LIT_OPTS="-v --no-progress-bar --show-unsupported --show-pass --show-xfail --max-time 3600 --time-tests ${{ inputs.extra_lit_opts }}" - cmake --build build-e2e --target check-sycl-e2e + uses: ./devops/actions/run-tests/windows/e2e + timeout-minutes: 60 + with: + ref: ${{ inputs.tests_ref || inputs.repo_ref || github.sha }} + binaries_artifact: ${{ inputs.e2e_binaries_artifact }} + testing_mode: ${{ inputs.e2e_testing_mode }} + extra_cmake_args: ${{ inputs.extra_cmake_args }} + target_devices: ${{ inputs.target_devices }} + extra_lit_opts: ${{ inputs.extra_lit_opts }} + retention-days: ${{ inputs.artifact_retention_days }} + cxx: ${{ inputs.cxx }} - name: Run SYCL CTS Tests if: inputs.tests_selector == 'cts' @@ -175,7 +182,7 @@ jobs: if: always() shell: powershell run: | - Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" + Invoke-WebRequest -Uri "https://raw.githubusercontent.com/intel/llvm/refs/heads/sycl/devops/scripts/windows_detect_hung_tests.ps1" -OutFile "windows_detect_hung_tests.ps1" -Headers @{Authorization = "Bearer ${{ github.token }}"} powershell.exe -File windows_detect_hung_tests.ps1 $exitCode = $LASTEXITCODE Remove-Item -Path "windows_detect_hung_tests.ps1" diff --git a/devops/actions/benchmarking/aggregate/action.yml b/devops/actions/benchmarking/aggregate/action.yml deleted file mode 100644 index c062636684b1f..0000000000000 --- a/devops/actions/benchmarking/aggregate/action.yml +++ /dev/null @@ -1,95 +0,0 @@ -name: 'Aggregate compute-benchmark results and produce historical averages' - -# The benchmarking workflow in sycl-linux-run-tests.yml passes or fails based on -# how the benchmark results compare to a historical average: This historical -# average is calculated in this composite workflow, which aggregates historical -# data and produces measures of central tendency (median in this case) used for -# this purpose. -# -# This action assumes that /devops has been checked out in ./devops. This action -# also assumes that GITHUB_TOKEN was properly set in env, because according to -# Github, that's apparently the recommended way to pass a secret into a github -# action: -# -# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets -# - -inputs: - lookback_days: - type: number - required: true - -runs: - using: "composite" - steps: - - name: Obtain oldest timestamp allowed for data in aggregation - shell: bash - run: | - # DO NOT use inputs.lookback_days directly, only use SANITIZED_TIMESTAMP. - SANITIZED_LOOKBACK_DAYS="$(echo '${{ inputs.lookback_days }}' | grep -oE '^[0-9]+$')" - if [ -z "$SANITIZED_LOOKBACK_DAYS" ]; then - echo "Please ensure inputs.lookback_days is a number." - exit 1 - fi - SANITIZED_TIMESTAMP="$(date -d "$SANITIZED_LOOKBACK_DAYS days ago" +%Y%m%d_%H%M%S)" - if [ -z "$(echo "$SANITIZED_TIMESTAMP" | grep -oE '^[0-9]{8}_[0-9]{6}$' )" ]; then - echo "Invalid timestamp generated: is inputs.lookback_days valid?" - exit 1 - fi - echo "SANITIZED_TIMESTAMP=$SANITIZED_TIMESTAMP" >> $GITHUB_ENV - - name: Load benchmarking configuration - shell: bash - run: | - $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) - echo "SANITIZED_PERF_RES_GIT_REPO=$SANITIZED_PERF_RES_GIT_REPO" >> $GITHUB_ENV - echo "SANITIZED_PERF_RES_GIT_BRANCH=$SANITIZED_PERF_RES_GIT_BRANCH" >> $GITHUB_ENV - - name: Checkout historical performance results repository - shell: bash - run: | - if [ ! -d ./llvm-ci-perf-results ]; then - git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results - fi - - name: Run aggregator on historical results - shell: bash - run: | - # The current format of the historical results respository is: - # - # /// - # - # Thus, a min/max depth of 3 is used to enumerate all test cases in the - # repository. Test name is also derived from here. - find ./llvm-ci-perf-results -mindepth 3 -maxdepth 3 -type d ! -path '*.git*' | - while read -r dir; do - test_name="$(basename "$dir")" - python ./devops/scripts/benchmarking/aggregate.py ./devops "$test_name" "$dir" "$SANITIZED_TIMESTAMP" - done - - name: Upload average to the repo - shell: bash - run: | - cd ./llvm-ci-perf-results - git config user.name "SYCL Benchmarking Bot" - git config user.email "sys_sycl_benchmarks@intel.com" - git pull - # Make sure changes have been made - if git diff --quiet && git diff --cached --quiet; then - echo "No changes to median, skipping push." - else - git add . - git commit -m "[GHA] Aggregate median data from $SANITIZED_TIMESTAMP to $(date +%Y%m%d_%H%M%S)" - git push "https://$GITHUB_TOKEN@github.com/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH" - fi - - name: Find aggregated average results artifact here - if: always() - shell: bash - run: | - cat << EOF - # - # Artifact link for aggregated averages here: - # - EOF - - name: Archive new medians - if: always() - uses: actions/upload-artifact@v4 - with: - name: llvm-ci-perf-results new medians - path: ./llvm-ci-perf-results/**/*-median.csv diff --git a/devops/actions/cached_checkout/action.yml b/devops/actions/cached_checkout/action.yml index 6f58f1de825bf..9e492f902e27f 100644 --- a/devops/actions/cached_checkout/action.yml +++ b/devops/actions/cached_checkout/action.yml @@ -31,7 +31,7 @@ runs: mkdir -p ${{ inputs.cache_path }}/${{ inputs.repository }} cd ${{ inputs.cache_path }}/${{ inputs.repository }} if [ -d ./.git ]; then - git pull --prune + git pull --prune --rebase else git clone https://github.com/${{ inputs.repository }}.git . git gc diff --git a/devops/actions/reset_gpu/action.yml b/devops/actions/reset_gpu/action.yml new file mode 100644 index 0000000000000..7fca091828385 --- /dev/null +++ b/devops/actions/reset_gpu/action.yml @@ -0,0 +1,32 @@ +name: Reset Intel GPU + +runs: + using: "composite" + steps: + - name: Reset Intel GPU + shell: bash + run: | + # First reset all xe devices. + driver_path="/sys/bus/pci/drivers/xe" + + if [ -d "$driver_path" ]; then + # Extract PCI paths of devices bound to xe + for device in $(ls "$driver_path" | grep -E '^[0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}\.[0-9]$'); do + sysfs_path="/sys/bus/pci/devices/$device" + sudo bash -c 'echo $0 > $1/unbind' $device $driver_path + sudo bash -c 'echo 1 > $0/reset' $sysfs_path + sudo bash -c 'echo $0 > $1/bind' $device $driver_path + echo "Reset $device" + done + fi + + # Then reset all i915 devices. We don't do a PCI FLR because + # it seems to fail on some older GPUs. + sudo mount -t debugfs none /sys/kernel/debug || true + dirs=$(sudo bash -c 'ls -d /sys/kernel/debug/dri/*') || true + for dir in $dirs; do + if sudo test -e "${dir}/i915_wedged"; then + sudo bash -c 'echo 1 > ${dir}/i915_wedged' $dir + echo "Reset $(basename $dir)" + fi + done diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index e357e2bddec30..8062ccdbcaea0 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -1,24 +1,30 @@ -name: 'Run compute-benchmarks' - -# Run compute-benchmarks on SYCL -# -# This action assumes SYCL is in ./toolchain, and that /devops has been -# checked out in ./devops. This action also assumes that GITHUB_TOKEN -# was properly set in env, because according to Github, that's apparently the -# recommended way to pass a secret into a github action: -# -# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets +name: 'Run benchmarks' + +# This action assumes the following prerequisites: # -# This action also expects a RUNNER_TAG environment variable to be set to the -# runner tag used to run this workflow: Currently, only gen12 and pvc on Linux -# are fully supported. Although this workflow won't stop you from running other -# devices, note that only gen12 and pvc has been tested to work. +# - SYCL is placed in ./toolchain -- TODO change this +# - /devops has been checked out in ./devops. +# - env.GITHUB_TOKEN was properly set, because according to Github, that's +# apparently the recommended way to pass a secret into a github action: + +# https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#accessing-your-secrets # +# - env.RUNNER_TAG set to the runner tag used to run this workflow: Currently, +# only specific runners are fully supported. inputs: target_devices: type: string required: True + upload_results: + type: string + required: True + save_name: + type: string + required: True + preset: + type: string + required: True runs: using: "composite" @@ -27,30 +33,44 @@ runs: shell: bash env: TARGET_DEVICE: ${{ inputs.target_devices }} + PRESET: ${{ inputs.preset }} run: | case "$RUNNER_TAG" in - '["Linux", "gen12"]' | '["Linux", "pvc"]') ;; + '["PVC_PERF"]' ) ;; + '["BMG_PERF"]' ) ;; *) echo "#" - echo "# WARNING: Only gen12/pvc on Linux is fully supported." + echo "# WARNING: Only specific tuned runners are fully supported." echo "# This workflow is not guaranteed to work with other runners." echo "#" ;; esac + # Ensure runner name has nothing injected + # TODO: in terms of security, is this overkill? + if [ -z "$(printf '%s' "$RUNNER_NAME" | grep -oE '^[a-zA-Z0-9_-]+$')" ]; then + echo "Bad runner name, please ensure runner name is [a-zA-Z0-9_-]." + exit 1 + fi + # input.target_devices is not directly used, as this allows code injection case "$TARGET_DEVICE" in level_zero:*) ;; + level_zero_v2:*) ;; *) echo "#" echo "# WARNING: Only level_zero backend is fully supported." echo "# This workflow is not guaranteed to work with other backends." echo "#" ;; esac + echo "ONEAPI_DEVICE_SELECTOR=$TARGET_DEVICE" >> $GITHUB_ENV + + # Make sure specified preset is a known value and is not malicious + python3 ./devops/scripts/benchmarks/presets.py query "$PRESET" + [ "$?" -ne 0 ] && exit 1 # Stop workflow if invalid preset + echo "PRESET=$PRESET" >> $GITHUB_ENV - name: Compute CPU core range to run benchmarks on shell: bash run: | - # Taken from ur-benchmark-reusable.yml: - # Compute the core range for the first NUMA node; second node is used by # UMF. Skip the first 4 cores as the kernel is likely to schedule more # work on these. @@ -67,62 +87,143 @@ runs: ZE_AFFINITY_MASK=0 echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV + - name: Checkout results repo + shell: bash + run: | + git clone -b unify-ci https://github.com/intel/llvm-ci-perf-results - name: Run compute-benchmarks + env: + # Need to append "__" to save name in order to follow + # conventions: + SAVE_PREFIX: ${{ inputs.save_name }} shell: bash run: | - cat << EOF - # - # NOTE TO DEVELOPERS: - # - - Check latter steps of the workflow: This job produces an artifact with: - - benchmark results from passing/failing tests - - log containing all failing (too slow) benchmarks - - log containing all erroring benchmarks - - While this step in the workflow provides debugging output describing this - information, it might be easier to inspect the logs from the artifact - instead. - - EOF - export ONEAPI_DEVICE_SELECTOR="${{ inputs.target_devices }}" + # TODO generate summary + display helpful message here export CMPLR_ROOT=./toolchain echo "-----" + # Using --break-system-packages because: + # - venv is not installed + # - unable to install anything via pip, as python packages in the docker + # container are managed by apt + # - apt is unable to install anything due to unresolved dpkg dependencies, + # as a result of how the sycl nightly images are created + pip install --user --break-system-packages -r ./devops/scripts/benchmarks/requirements.txt + echo "-----" + + # By default, the benchmark scripts forceload level_zero + FORCELOAD_ADAPTER="${ONEAPI_DEVICE_SELECTOR%%:*}" + echo "Adapter: $FORCELOAD_ADAPTER" + + case "$ONEAPI_DEVICE_SELECTOR" in + level_zero:*) SAVE_SUFFIX="L0" ;; + level_zero_v2:*) + SAVE_SUFFIX="L0v2" + export ONEAPI_DEVICE_SELECTOR="level_zero:gpu" # "level_zero_v2:gpu" not supported anymore + export SYCL_UR_USE_LEVEL_ZERO_V2=1 + ;; + opencl:*) SAVE_SUFFIX="OCL" ;; + *) SAVE_SUFFIX="${ONEAPI_DEVICE_SELECTOR%%:*}";; + esac + case "$RUNNER_TAG" in + '["PVC_PERF"]') MACHINE_TYPE="PVC" ;; + '["BMG_PERF"]') MACHINE_TYPE="BMG" ;; + # Best effort at matching + *) + MACHINE_TYPE="${RUNNER_TAG#[\"}" + MACHINE_TYPE="${MACHINE_TYPE%_PERF=\"]}" + ;; + esac + SAVE_NAME="${SAVE_PREFIX}_${MACHINE_TYPE}_${SAVE_SUFFIX}" + echo "SAVE_NAME=$SAVE_NAME" >> $GITHUB_ENV + SAVE_TIMESTAMP="$(date -u +'%Y%m%d_%H%M%S')" # Timestamps are in UTC time + + # Cache the compute_runtime version from dependencies.json, but perform a + # check with L0 version before using it: This value is not guaranteed to + # accurately reflect the current compute_runtime version used, as the + # docker images are built nightly. + export COMPUTE_RUNTIME_TAG_CACHE="$(cat ./devops/dependencies.json | jq -r .linux.compute_runtime.github_tag)" + sycl-ls echo "-----" - taskset -c "$CORES" ./devops/scripts/benchmarking/benchmark.sh -n '${{ runner.name }}' -s || exit 1 - - name: Push compute-benchmarks results + + taskset -c "$CORES" ./devops/scripts/benchmarks/main.py \ + "$(realpath ./llvm_test_workdir)" \ + --sycl "$(realpath ./toolchain)" \ + --adapter "$FORCELOAD_ADAPTER" \ + --save "$SAVE_NAME" \ + --output-html remote \ + --results-dir "./llvm-ci-perf-results/" \ + --output-dir "./llvm-ci-perf-results/" \ + --preset "$PRESET" \ + --timestamp-override "$SAVE_TIMESTAMP" \ + --detect-version sycl,compute_runtime + echo "-----" + python3 ./devops/scripts/benchmarks/compare.py to_hist \ + --name "$SAVE_NAME" \ + --compare-file "./llvm-ci-perf-results/results/${SAVE_NAME}_${SAVE_TIMESTAMP}.json" \ + --results-dir "./llvm-ci-perf-results/results/" \ + --regression-filter '^[a-z_]+_sycl ' + echo "-----" + + - name: Cache changes to benchmark folder for archival purposes if: always() shell: bash + run: | + cd "./llvm-ci-perf-results" + git add . + for diff in $(git diff HEAD --name-only); do + mkdir -p "../cached_changes/$(dirname $diff)" + cp "$diff" "../cached_changes/$diff" + done + - name: Push benchmarks results + if: inputs.upload_results == 'true' && always() + shell: bash run: | - # TODO -- waiting on security clearance - # Load configuration values - $(python ./devops/scripts/benchmarking/load_config.py ./devops constants) - cd "./llvm-ci-perf-results" git config user.name "SYCL Benchmarking Bot" git config user.email "sys_sycl_benchmarks@intel.com" - git pull - git add . - # Make sure changes have been made + results_branch="unify-ci" + if git diff --quiet && git diff --cached --quiet; then echo "No new results added, skipping push." - else - git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" - git push "https://$GITHUB_TOKEN@github.com/$SANITIZED_PERF_RES_GIT_REPO.git" "$SANITIZED_PERF_RES_GIT_BRANCH" + exit 0 fi - - name: Find benchmark result artifact here - if: always() - shell: bash - run: | - cat << EOF - # - # Artifact link for benchmark results here: - # - EOF - - name: Archive compute-benchmark results + + for attempt in 1 2 3; do + echo "Attempt $attempt to push new results" + git add . + git commit -m "[GHA] Upload compute-benchmarks results from https://github.com/intel/llvm/actions/runs/${{ github.run_id }}" + results_file="$(git diff HEAD~1 --name-only -- results/ | head -n 1)" + + if git push "https://$GITHUB_TOKEN@github.com/intel/llvm-ci-perf-results.git" "$results_branch"; then + echo "Push succeeded" + break + fi + + echo "Push failed, retrying..." + if [ -n "$results_file" ]; then + cached_result="$(mktemp -d)/$(basename $results_file)" + mv "$results_file" "$cached_result" + + git reset --hard "origin/$results_branch" + git pull origin "$results_branch" + + mv "$cached_result" "$results_file" + fi + + echo "Regenerating data.json..." + cd ../ + ./devops/scripts/benchmarks/main.py \ + "$(realpath ./llvm_test_workdir)" \ + --output-html remote \ + --results-dir "./llvm-ci-perf-results/" \ + --output-dir "./llvm-ci-perf-results/" \ + --dry-run + cd - + done + - name: Archive benchmark results if: always() uses: actions/upload-artifact@v4 with: - name: Compute-benchmark run ${{ github.run_id }} (${{ runner.name }}) - path: ./artifact + name: Benchmark run ${{ github.run_id }} (${{ env.SAVE_NAME }}) + path: ./cached_changes diff --git a/devops/actions/run-tests/cts/action.yml b/devops/actions/run-tests/cts/action.yml index 93fd2770e166d..e70c4d292576b 100644 --- a/devops/actions/run-tests/cts/action.yml +++ b/devops/actions/run-tests/cts/action.yml @@ -67,7 +67,7 @@ runs: # SYCL_CTS_EXCLUDE_TEST_CATEGORIES - Optional file specifying a list # of test categories to be excluded from the build. echo "::group::Excluded test categories" - cat $cts_exclude_filter + [ -f "$cts_exclude_filter" ] && cat "$cts_exclude_filter" echo "::endgroup::" fi @@ -134,11 +134,13 @@ runs: cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU fi - while IFS= read -r line; do - if [[ $line != \#* ]]; then - rm "./build-cts/bin/test_$line" - fi - done < "$cts_exclude_filter" + if [ -f "$cts_exclude_filter" ]; then + while IFS= read -r line; do + if [[ $line != \#* ]]; then + rm "./build-cts/bin/test_$line" + fi + done < "$cts_exclude_filter" + fi - name: Run SYCL CTS tests # Proceed with execution even if the previous two steps did not succeed. diff --git a/devops/actions/run-tests/e2e/action.yml b/devops/actions/run-tests/e2e/action.yml index 47fc75599ccdb..6baacc86451c0 100644 --- a/devops/actions/run-tests/e2e/action.yml +++ b/devops/actions/run-tests/e2e/action.yml @@ -10,12 +10,13 @@ inputs: extra_cmake_args: required: false target_devices: - required: true + required: false + default: all extra_lit_opts: required: false retention-days: required: false - cxx_compiler: + sycl_compiler: required: false @@ -23,11 +24,13 @@ runs: using: "composite" steps: - name: Checkout E2E tests - uses: ./devops/actions/cached_checkout + uses: actions/checkout@v4 with: path: llvm ref: ${{ inputs.ref || github.sha }} - cache_path: "/__w/repo_cache/" + sparse-checkout: | + llvm/utils/lit + sycl/test-e2e - name: Download E2E Binaries if: inputs.testing_mode == 'run-only' @@ -55,7 +58,7 @@ runs: if: inputs.testing_mode != 'run-only' shell: bash run: | - cmake -GNinja -B./build-e2e -S./llvm/sycl/test-e2e -DCMAKE_CXX_COMPILER="${{ inputs.cxx_compiler || '$(which clang++)'}}" -DLLVM_LIT="$PWD/llvm/llvm/utils/lit/lit.py" ${{ steps.cmake_opts.outputs.opts }} + cmake -GNinja -B./build-e2e -S./llvm/sycl/test-e2e -DCMAKE_CXX_COMPILER="${{ inputs.sycl_compiler || '$(which clang++)'}}" -DLLVM_LIT="$PWD/llvm/llvm/utils/lit/lit.py" ${{ steps.cmake_opts.outputs.opts }} - name: SYCL End-to-end tests shell: bash {0} env: @@ -65,6 +68,7 @@ runs: exit_code=$? cat e2e.log if [ $exit_code -ne 0 ]; then + # This is duplicated between lin/win, updates must change both. awk '/^Failed Tests|Unexpectedly Passed Tests|Unresolved tests|Timed Out Tests|Testing Time/{flag=1}/FAILED: CMakeFiles/{flag=0}flag' e2e.log >> $GITHUB_STEP_SUMMARY fi exit $exit_code diff --git a/devops/actions/run-tests/windows/cts/action.yml b/devops/actions/run-tests/windows/cts/action.yml index 4d4d7aa7f9f46..7404e09ca2bd2 100644 --- a/devops/actions/run-tests/windows/cts/action.yml +++ b/devops/actions/run-tests/windows/cts/action.yml @@ -67,7 +67,7 @@ runs: # SYCL_CTS_EXCLUDE_TEST_CATEGORIES - Optional file specifying a list # of test categories to be excluded from the build. echo "::group::Excluded test categories" - cat $cts_exclude_filter + [ -f "$cts_exclude_filter" ] && cat "$cts_exclude_filter" echo "::endgroup::" fi @@ -136,11 +136,13 @@ runs: cts_exclude_filter=$PWD/sycl/cts_exclude_filter/L0_GPU fi - while IFS= read -r line; do - if [[ $line != \#* ]]; then - rm "./build-cts/bin/test_$line" - fi - done < "$cts_exclude_filter" + if [ -f "$cts_exclude_filter" ]; then + while IFS= read -r line; do + if [[ $line != \#* ]]; then + rm "./build-cts/bin/test_$line" + fi + done < "$cts_exclude_filter" + fi - name: Run SYCL CTS tests # Proceed with execution even if the previous two steps did not succeed. diff --git a/devops/actions/run-tests/windows/e2e/action.yml b/devops/actions/run-tests/windows/e2e/action.yml new file mode 100644 index 0000000000000..3bfd75bde3784 --- /dev/null +++ b/devops/actions/run-tests/windows/e2e/action.yml @@ -0,0 +1,112 @@ +name: 'Run SYCL E2E tests on Windows' + +inputs: + ref: + required: false + binaries_artifact: + required: false + testing_mode: + required: true + extra_cmake_args: + required: false + target_devices: + required: false + default: all + extra_lit_opts: + required: false + retention-days: + required: false + sycl_compiler: + required: false + cxx: + required: false + default: "cl" + +runs: + using: "composite" + steps: + - name: Checkout E2E tests + uses: actions/checkout@v4 + with: + path: llvm + ref: ${{ inputs.ref || github.sha }} + sparse-checkout: | + llvm/utils/lit + sycl/test-e2e + - name: Download E2E Binaries + if: inputs.testing_mode == 'run-only' + uses: actions/download-artifact@v4 + with: + name: ${{ inputs.binaries_artifact }} + - name: Extract E2E Binaries + if: inputs.testing_mode == 'run-only' + shell: bash + run: | + mkdir build-e2e + tar -xf e2e_bin.tar.gz -C build-e2e + + - name: Deduce E2E CMake options + id: cmake_opts + shell: bash + env: + CMAKE_EXTRA_ARGS: ${{ inputs.extra_cmake_args }} + run: | + if [ -n "$CMAKE_EXTRA_ARGS" ]; then + echo "opts=$CMAKE_EXTRA_ARGS" >> $GITHUB_OUTPUT + fi + + - name: Make E2E folder + if: inputs.testing_mode != 'run-only' + shell: bash + run: | + mkdir build-e2e + + - name: Configure E2E tests + shell: bash + run: | + cmake -GNinja -B build-e2e -S./llvm/sycl/test-e2e -DCMAKE_CXX_COMPILER="${{ inputs.sycl_compiler || '$(which clang++).exe' }}" -DLEVEL_ZERO_LIBS_DIR="D:\\github\\level-zero_win-sdk\\lib" -DLEVEL_ZERO_INCLUDE="D:\\github\\level-zero_win-sdk\\include" -DLLVM_LIT="..\\llvm\\llvm\\utils\\lit\\lit.py" ${{ steps.cmake_opts.outputs.opts }} + + - name: Keep track of files after configuring E2E step + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + shell: bash + run: ls build-e2e > e2econf_files.txt + + - name: Run End-to-End tests + shell: bash {0} + env: + LIT_OPTS: -v --no-progress-bar --show-unsupported --show-pass --show-xfail --max-time ${{ inputs.e2e_testing_mode == 'run-only' && 1200 || 3600 }} --time-tests --param print_features=True --param test-mode=${{ inputs.testing_mode }} --param sycl_devices=${{ inputs.target_devices }} ${{ inputs.extra_lit_opts }} + run: | + # Run E2E tests. + cmake --build build-e2e --target check-sycl-e2e > e2e.log 2>&1 + + exit_code=$? + cat e2e.log + if [ $exit_code -ne 0 ]; then + # This is duplicated between lin/win, updates must change both. + awk '/^Failed Tests|Unexpectedly Passed Tests|Unresolved tests|Timed Out Tests|Testing Time/{flag=1}/FAILED: CMakeFiles/{flag=0}flag' e2e.log >> $GITHUB_STEP_SUMMARY + fi + exit $exit_code + + # Github CI doesn't support containers on Windows, so we cannot guarantee + # that paths are the same between building and running systems. To avoid + # CMake issues related to absolute paths we reconfigure the build-e2e + # folder on the run system. + - name: Remove E2E configuration files + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + shell: bash + run: | + for FILE in $(cat e2econf_files.txt); do rm -r build-e2e/$FILE; done + rm e2econf_files.txt + + - name: Pack E2E test binaries + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + shell: bash + run: | + tar -czf e2e_bin.tar.gz -C build-e2e . + - name: Upload E2E test binaries + if: ${{ always() && !cancelled() && inputs.binaries_artifact != '' && inputs.testing_mode != 'run-only'}} + uses: actions/upload-artifact@v4 + with: + name: ${{ inputs.binaries_artifact }} + path: e2e_bin.tar.gz + retention-days: ${{ inputs.retention-days }} diff --git a/devops/benchmarking/config.ini b/devops/benchmarking/config.ini deleted file mode 100644 index 988d1d9f08af9..0000000000000 --- a/devops/benchmarking/config.ini +++ /dev/null @@ -1,44 +0,0 @@ -; -; This file contains configuration options to change the behaviour of the -; benchmarking workflow in sycl-linux-run-tests.yml. -; -; DO NOT USE THE CONTENTS OF THIS FILE DIRECTLY -- Due to security concerns, The -; contents of this file must be sanitized first before use. -; See: /devops/scripts/benchmarking/common.py -; - -; Compute-benchmark compile/run options -[compute_bench] -; Value for -j during compilation of compute-benchmarks -compile_jobs = 40 -; Number of iterations to run compute-benchmark tests -iterations = 5000 - -; Options for benchmark result metrics (to record/compare against) -[metrics] -; Sets the metrics to record/aggregate in the historical average. -; Format: comma-separated list of column names in compute-benchmark results -recorded = Median,StdDev -; Sets the tolerance for each recorded metric and their allowed deviation from -; the historical average. Metrics not included here are not compared against -; when passing/failing benchmark results. -; Format: comma-separated list of : -tolerances = Median:0.08 - -; Options for computing historical averages -[average] -; Number of days (from today) to look back for results when computing historical -; average -cutoff_range = 7 -; Minimum number of samples required to compute a historical average -min_threshold = 10 - -; ONEAPI_DEVICE_SELECTOR linting/options -[device_selector] -; Backends to allow in device_selector -enabled_backends = level_zero,opencl,cuda,hip -; native_cpu is disabled - -; Devices to allow in device_selector -enabled_devices = cpu,gpu -; fpga is disabled diff --git a/devops/benchmarking/constants.ini b/devops/benchmarking/constants.ini deleted file mode 100644 index 9281ece8f4950..0000000000000 --- a/devops/benchmarking/constants.ini +++ /dev/null @@ -1,48 +0,0 @@ -; -; This file defines constants used throughout the benchmarking workflow in -; sycl-linux-run-tests.yml. If you're trying to change the behavior of this -; workflow, you're likely looking for /devops/benchmarking/config.ini instead. -; -; DO NOT USE THE CONTENTS OF THIS FILE DIRECTLY -- Due to security concerns, The -; contents of this file must be sanitized first before use. -; See: /devops/scripts/benchmarking/common.py -; - -; Constants for compute-benchmarks -[compute_bench] -git_repo = intel/compute-benchmarks -git_branch = master -git_commit = 230a3db4d8d03c0e9a663988f7c3abbd1137a1e0 -; path = ./compute-benchmarks - -; Constants for git repo storing benchmark performance results -[perf_res] -git_repo = intel/llvm-ci-perf-results -git_branch = main -; Path to clone performance result repo -; path = ./llvm-ci-perf-results - -; It was decided that paths should be hardcoded throughout this workflow for -; security reasons and ease of readability. Do not use paths as constants. - -; ; Constants for artifacts -; [artifact] -; ; Path to root folder storing benchmark CI artifact -; path = ./artifact -; ; Path (relative to artifact.path) to cache compute-benchmark results -; ; -; ; If a test result does not get moved out of this catch-all cache path, it is -; ; considered to have failed -; output_cache = ./artifact/failed_tests -; ; Path (relative to artifact.path) to cache passing compute-benchmark results -; passing_cache = ./artifact/passing_tests - -; [timestamp] -; ; Timestamp format used for -; format = %%Y%%m%%d_%%H%%M%%S - -; [benchmark_log] -; ; Log file for test cases that perform over the allowed variance -; slow = ./artifact/benchmarks_failed.log -; ; Log file for test cases that errored / failed to build -; error = ./artifact/benchmarks_errored.log diff --git a/devops/benchmarking/enabled_tests.conf b/devops/benchmarking/enabled_tests.conf deleted file mode 100644 index 20659cbea636d..0000000000000 --- a/devops/benchmarking/enabled_tests.conf +++ /dev/null @@ -1,8 +0,0 @@ -# Test cases to be enabled: -api_overhead_benchmark_sycl -memory_benchmark_sycl -miscellaneous_benchmark_sycl -ulls_benchmark_sycl - -# As of January 2025, these are every compute-benchmark tests with a SYCL -# implementation. diff --git a/devops/containers/ubuntu2204_build.Dockerfile b/devops/containers/ubuntu2204_build.Dockerfile index 1aa814aaa775c..ff26dc7761772 100644 --- a/devops/containers/ubuntu2204_build.Dockerfile +++ b/devops/containers/ubuntu2204_build.Dockerfile @@ -22,7 +22,7 @@ RUN mkdir --parents --mode=0755 /etc/apt/keyrings RUN wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | \ gpg --dearmor | tee /etc/apt/keyrings/rocm.gpg > /dev/null && \ # Add rocm repo -echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/6.1.1 jammy main" \ +echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/6.3 jammy main" \ | tee --append /etc/apt/sources.list.d/rocm.list && \ printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | tee /etc/apt/preferences.d/rocm-pin-600 # Install the kernel driver diff --git a/devops/containers/ubuntu2204_intel_drivers.Dockerfile b/devops/containers/ubuntu2204_intel_drivers.Dockerfile index b27aee1b633b0..d8eb12fa5c82f 100644 --- a/devops/containers/ubuntu2204_intel_drivers.Dockerfile +++ b/devops/containers/ubuntu2204_intel_drivers.Dockerfile @@ -5,8 +5,6 @@ FROM $base_image:$base_tag ENV DEBIAN_FRONTEND=noninteractive -ARG use_unstable_driver=true - USER root RUN apt update && apt install -yqq wget @@ -18,12 +16,7 @@ COPY dependencies.json / RUN mkdir /runtimes ENV INSTALL_LOCATION=/runtimes RUN --mount=type=secret,id=github_token \ - if [ "$use_unstable_driver" = "true" ]; then \ - install_driver_opt=" --use-latest"; \ - else \ - install_driver_opt=" dependencies.json"; \ - fi && \ - GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh $install_driver_opt --all + GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh dependencies.json --all COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh diff --git a/devops/containers/ubuntu2404_base.Dockerfile b/devops/containers/ubuntu2404_base.Dockerfile index 3cdad5b74366e..6890a42dcc95a 100644 --- a/devops/containers/ubuntu2404_base.Dockerfile +++ b/devops/containers/ubuntu2404_base.Dockerfile @@ -4,6 +4,11 @@ ENV DEBIAN_FRONTEND=noninteractive USER root +# Configure LLVM nightly repo +RUN apt-get update -qq && apt-get install --no-install-recommends -yqq curl ca-certificates +RUN curl -sSL https://apt.llvm.org/llvm-snapshot.gpg.key -o /etc/apt/trusted.gpg.d/apt.llvm.org.asc +RUN echo 'deb http://apt.llvm.org/noble/ llvm-toolchain-noble main' > /etc/apt/sources.list.d/llvm.list + # Install SYCL prerequisites COPY scripts/install_build_tools.sh /install.sh RUN /install.sh diff --git a/devops/containers/ubuntu2404_intel_drivers.Dockerfile b/devops/containers/ubuntu2404_intel_drivers.Dockerfile index a0970f3900141..b61a2a5c10162 100644 --- a/devops/containers/ubuntu2404_intel_drivers.Dockerfile +++ b/devops/containers/ubuntu2404_intel_drivers.Dockerfile @@ -5,8 +5,6 @@ FROM $base_image:$base_tag ENV DEBIAN_FRONTEND=noninteractive -ARG use_unstable_driver=true - USER root RUN apt update && apt install -yqq wget @@ -18,12 +16,7 @@ COPY dependencies.json / RUN mkdir /runtimes ENV INSTALL_LOCATION=/runtimes RUN --mount=type=secret,id=github_token \ - if [ "$use_unstable_driver" = "true" ]; then \ - install_driver_opt=" --use-latest"; \ - else \ - install_driver_opt=" dependencies.json"; \ - fi && \ - GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh $install_driver_opt --all + GITHUB_TOKEN=$(cat /run/secrets/github_token) /install_drivers.sh dependencies.json --all COPY scripts/drivers_entrypoint.sh /drivers_entrypoint.sh diff --git a/devops/dependencies-igc-dev.json b/devops/dependencies-igc-dev.json index 0f1ce44811665..14820d082a23b 100644 --- a/devops/dependencies-igc-dev.json +++ b/devops/dependencies-igc-dev.json @@ -1,10 +1,10 @@ { "linux": { "igc_dev": { - "github_tag": "igc-dev-a9e1ef2", - "version": "a9e1ef2", - "updated_at": "2025-03-09T09:38:44Z", - "url": "https://api.github.com/repos/intel/intel-graphics-compiler/actions/artifacts/2717684926/zip", + "github_tag": "igc-dev-af17e0f", + "version": "af17e0f", + "updated_at": "2025-04-09T23:37:41Z", + "url": "https://api.github.com/repos/intel/intel-graphics-compiler/actions/artifacts/2915515592/zip", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" } } diff --git a/devops/dependencies.json b/devops/dependencies.json index 1096ddfc9094b..605fcedd72c85 100644 --- a/devops/dependencies.json +++ b/devops/dependencies.json @@ -1,49 +1,40 @@ { "linux": { "compute_runtime": { - "github_tag": "25.13.33276.19", - "version": "25.13.33276.19", - "url": "https://github.com/intel/compute-runtime/releases/tag/25.13.33276.19", + "github_tag": "25.18.33578.6", + "version": "25.18.33578.6", + "url": "https://github.com/intel/compute-runtime/releases/tag/25.18.33578.6", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "igc": { - "github_tag": "v2.10.10", - "version": "v2.10.10", - "url": "https://github.com/intel/intel-graphics-compiler/releases/tag/v2.10.10", + "github_tag": "v2.11.7", + "version": "v2.11.7", + "url": "https://github.com/intel/intel-graphics-compiler/releases/tag/v2.11.7", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "cm": { - "github_tag": "cmclang-1.0.144", - "version": "1.0.144", - "url": "https://github.com/intel/cm-compiler/releases/tag/cmclang-1.0.144", + "github_tag": "cmclang-1.0.119", + "version": "1.0.119", + "url": "https://github.com/intel/cm-compiler/releases/tag/cmclang-1.0.119", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "level_zero": { - "github_tag": "v1.20.2", - "version": "v1.20.2", - "url": "https://github.com/oneapi-src/level-zero/releases/tag/v1.20.2", + "github_tag": "v1.21.9", + "version": "v1.21.9", + "url": "https://github.com/oneapi-src/level-zero/releases/tag/v1.21.9", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclgpu" }, "tbb": { - "github_tag": "v2022.0.0", - "version": "2022.0.0", - "url": "https://github.com/oneapi-src/oneTBB/releases/download/v2022.0.0/oneapi-tbb-2022.0.0-lin.tgz", + "github_tag": "v2022.1.0", + "version": "2022.1.0", + "url": "https://github.com/uxlfoundation/oneTBB/releases/download/v2022.1.0/oneapi-tbb-2022.1.0-lin.tgz", "root": "{DEPS_ROOT}/tbb/lin" }, "oclcpu": { - "github_tag": "2024-WW43", - "version": "2024.18.10.0.08", - "url": "https://github.com/intel/llvm/releases/download/2024-WW43/oclcpuexp-2024.18.10.0.08_rel.tar.gz", + "github_tag": "2025-WW13", + "version": "2025.19.3.0.17_230222", + "url": "https://github.com/intel/llvm/releases/download/2025-WW13/oclcpuexp-2025.19.3.0.17_230222_rel.tar.gz", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclcpu" - }, - "fpgaemu": { - "github_tag": "2024-WW43", - "version": "2024.18.10.0.08", - "url": "https://github.com/intel/llvm/releases/download/2024-WW43/fpgaemu-2024.18.10.0.08_rel.tar.gz", - "root": "{DEPS_ROOT}/opencl/runtime/linux/oclfpgaemu" - }, - "fpga": { - "root": "{ARCHIVE_ROOT}/comp/oclfpga/linux" } }, "windows": { @@ -53,25 +44,16 @@ "root": "" }, "tbb": { - "github_tag": "v2022.0.0", - "version": "2022.0.0", - "url": "https://github.com/oneapi-src/oneTBB/releases/download/v2022.0.0/oneapi-tbb-2022.0.0-win.zip", + "github_tag": "v2022.1.0", + "version": "2022.1.0", + "url": "https://github.com/uxlfoundation/oneTBB/releases/download/v2022.1.0/oneapi-tbb-2022.1.0-win.zip", "root": "{DEPS_ROOT}/tbb/win" }, "oclcpu": { - "github_tag": "2024-WW43", - "version": "2024.18.10.0.08", - "url": "https://github.com/intel/llvm/releases/download/2024-WW43/win-oclcpuexp-2024.18.10.0.08_rel.zip", + "github_tag": "2025-WW13", + "version": "2025.19.3.0.17_230222", + "url": "https://github.com/intel/llvm/releases/download/2025-WW13/win-oclcpuexp-2025.19.3.0.17_230222_rel.zip", "root": "{DEPS_ROOT}/opencl/runtime/linux/oclcpu" - }, - "fpgaemu": { - "github_tag": "2024-WW43", - "version": "2024.18.10.0.08", - "url": "https://github.com/intel/llvm/releases/download/2024-WW43/win-fpgaemu-2024.18.10.0.08_rel.zip", - "root": "{DEPS_ROOT}/opencl/runtime/linux/oclfpgaemu" - }, - "fpga": { - "root": "{ARCHIVE_ROOT}/comp/oclfpga/win" } } } diff --git a/devops/scripts/benchmarking/aggregate.py b/devops/scripts/benchmarking/aggregate.py deleted file mode 100644 index f62a8ffed83c5..0000000000000 --- a/devops/scripts/benchmarking/aggregate.py +++ /dev/null @@ -1,205 +0,0 @@ -import csv -import sys -from pathlib import Path -import heapq -import statistics -from common import Validate, SanitizedConfig -from abc import ABC, abstractmethod -import os - - -class Aggregator(ABC): - """ - Aggregator classes used to "aggregate" a pool of elements, and produce an - "average" (precisely, some "measure of central tendency") from the elements. - """ - - @staticmethod - @abstractmethod - def get_type() -> str: - """ - Return a string indicating the type of average this aggregator - produces. - """ - pass - - @abstractmethod - def add(self, n: float): - """ - Add/aggregate an element to the pool of elements used by this aggregator - to produce an average calculation. - """ - pass - - @abstractmethod - def get_avg(self) -> float: - """ - Produce an average from the pool of elements aggregated using add(). - """ - pass - - -class SimpleMedian(Aggregator): - """ - Simple median calculation: if the number of samples being generated are low, - this is the fastest median method. - """ - - def __init__(self): - self.elements = [] - - @staticmethod - def get_type() -> str: - return "median" - - def add(self, n: float): - self.elements.append(n) - - def get_avg(self) -> float: - return statistics.median(self.elements) - - -class StreamingMedian(Aggregator): - """ - Calculate medians incrementally using heaps: Theoretically the fastest way - to calculate a median from a stream of elements, but realistically is only - faster when dealing with huge numbers of samples that would be generated by - i.e. enabling this workflow in precommit and using longer periods of time. - """ - - def __init__(self): - # Gist: we keep a minheap and a maxheap, and store the median as the top - # of the minheap. When a new element comes it gets put into the heap - # based on if the element is bigger than the current median. Then, the - # heaps are heapified and the median is repopulated by heapify. - self.minheap_larger = [] - self.maxheap_smaller = [] - - @staticmethod - def get_type() -> str: - return "median" - - # Note: numbers on maxheap should be negative, as heapq - # is minheap by default - - def add(self, n: float): - if len(self.maxheap_smaller) == 0 or -self.maxheap_smaller[0] >= n: - heapq.heappush(self.maxheap_smaller, -n) - else: - heapq.heappush(self.minheap_larger, n) - - # Ensure minheap has more elements than maxheap - if len(self.maxheap_smaller) > len(self.minheap_larger) + 1: - heapq.heappush(self.minheap_larger, -heapq.heappop(self.maxheap_smaller)) - elif len(self.maxheap_smaller) < len(self.minheap_larger): - heapq.heappush(self.maxheap_smaller, -heapq.heappop(self.minheap_larger)) - - def get_avg(self) -> float: - if len(self.maxheap_smaller) == len(self.minheap_larger): - # Equal number of elements smaller and larger than "median": - # thus, there are two median values. The median would then become - # the average of both median values. - return (-self.maxheap_smaller[0] + self.minheap_larger[0]) / 2.0 - else: - # Otherwise, median is always in minheap, as minheap is always - # bigger - return -self.maxheap_smaller[0] - - -class Aggregate: - """ - Static class providing methods for aggregating data - """ - - @staticmethod - def hist_avg( - benchmark_name: str, res_dir: str, cutoff: str, aggregator=SimpleMedian - ): - if not os.path.isdir(res_dir): - print(f"Not a directory: {res_dir}.", file=sys.stderr) - exit(1) - - def get_csv_samples() -> list[str]: - """Get all valid .csv samples from the results folder.""" - cache_dir = Path(f"{res_dir}") - # Filter all benchmark .csv files in the result directory: - return list( - filter( - # Make sure the .csv "file" is a file: - lambda f: f.is_file() - # Make sure timestamp of .csv file is good format: - # [-19:-4] corresponds to the timestamp in the filename. - and Validate.timestamp(str(f)[-19:-4]) - # Make sure timestamp is bigger than cutoff timestamp: - and str(f)[-19:-4] > cutoff, - cache_dir.glob(f"{benchmark_name}-*_*.csv"), - ) - ) - - # Calculate median of every desired metric: - samples_aggregate = dict() - filtered_samples = get_csv_samples() - if len(filtered_samples) == 0: - print( - f"WARNING: No results for {benchmark_name} found from {cutoff} to now", - file=sys.stderr, - ) - for sample_path in filtered_samples: - with open(sample_path, "r") as sample_file: - for sample in csv.DictReader(sample_file): - test = sample["TestCase"] - # Construct entry in aggregator for test if it doesn't exist - # already: - if test not in samples_aggregate: - samples_aggregate[test] = { - metric: aggregator() - for metric in SanitizedConfig.METRICS_TOLERANCES - } - - # For each metric of concern, add to aggregator: - for metric in SanitizedConfig.METRICS_TOLERANCES: - sample_value = Validate.sanitize_stat(sample[metric]) - if not isinstance(sample_value, float): - print( - f"Malformatted statistic in {str(sample_path)}: " - + f"'{sample[metric]}' for {test}." - ) - exit(1) - # Add metric from sample for current test to aggregate: - samples_aggregate[test][metric].add(sample_value) - - # Calculate + write new average (from samples_aggregate) in new .csv file: - with open( - f"{res_dir}/{benchmark_name}-{aggregator.get_type()}.csv", "w" - ) as output_csv: - writer = csv.DictWriter( - output_csv, - fieldnames=["TestCase", *SanitizedConfig.METRICS_TOLERANCES.keys()], - ) - writer.writeheader() - for test in samples_aggregate: - writer.writerow( - {"TestCase": test} - | { - metric: samples_aggregate[test][metric].get_avg() - for metric in SanitizedConfig.METRICS_TOLERANCES - } - ) - - -if __name__ == "__main__": - if len(sys.argv) != 5: - print( - f"Usage: {sys.argv[0]} " - ) - exit(1) - if not Validate.timestamp(sys.argv[4]): - print(f"Bad cutoff timestamp, please use YYYYMMDD_HHMMSS.", file=sys.stderr) - exit(1) - if not Validate.filepath(sys.argv[1]): - print(f"Not a valid filepath: {sys.argv[1]}", file=sys.stderr) - exit(1) - # If the filepath provided passed filepath validation, then it is clean - SanitizedConfig.load(sys.argv[1]) - - Aggregate.hist_avg(sys.argv[2], sys.argv[3], sys.argv[4]) diff --git a/devops/scripts/benchmarking/benchmark.sh b/devops/scripts/benchmarking/benchmark.sh deleted file mode 100755 index bbfd669774f9a..0000000000000 --- a/devops/scripts/benchmarking/benchmark.sh +++ /dev/null @@ -1,300 +0,0 @@ -#!/bin/sh - -# -# benchmark.sh: Benchmark dpcpp using compute-benchmarks -# - -usage () { - >&2 echo "Usage: $0 -t [-B ] - -n Github runner name -- Required - -c Clean up working directory - -C Clean up working directory and exit - -s Cache results - -This script builds and runs benchmarks from compute-benchmarks." - exit 1 -} - -# Ensures test cases read from enabled_tests.conf contains no malicious content -_validate_testname () { - if [ -n "$(printf "%s" "$1" | sed "s/[a-zA-Z_]*//g")" ]; then - echo "Illegal characters in $TEST_CONFIG. Permitted characters: a-zA-Z_" - exit 1 - fi -} - -clone_perf_res() { - echo "### Cloning llvm-ci-perf-results ($SANITIZED_PERF_RES_GIT_REPO:$SANITIZED_PERF_RES_GIT_BRANCH) ###" - git clone -b "$SANITIZED_PERF_RES_GIT_BRANCH" "https://github.com/$SANITIZED_PERF_RES_GIT_REPO" ./llvm-ci-perf-results - [ "$?" -ne 0 ] && exit "$?" -} - -clone_compute_bench() { - echo "### Cloning compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###" - git clone -b "$SANITIZED_COMPUTE_BENCH_GIT_BRANCH" \ - --recurse-submodules "https://github.com/$SANITIZED_COMPUTE_BENCH_GIT_REPO" \ - ./compute-benchmarks - if [ ! -d "./compute-benchmarks" ]; then - echo "Failed to clone compute-benchmarks." - exit 1 - elif [ -n "$SANITIZED_COMPUTE_BENCH_GIT_COMMIT" ]; then - cd ./compute-benchmarks - git checkout "$SANITIZED_COMPUTE_BENCH_GIT_COMMIT" - if [ "$?" -ne 0 ]; then - echo "Failed to get compute-benchmarks commit '$SANITIZED_COMPUTE_BENCH_GIT_COMMIT'." - exit 1 - fi - cd - - fi -} - -build_compute_bench() { - echo "### Building compute-benchmarks ($SANITIZED_COMPUTE_BENCH_GIT_REPO:$SANITIZED_COMPUTE_BENCH_GIT_BRANCH) ###" - mkdir ./compute-benchmarks/build && cd ./compute-benchmarks/build && - # No reason to turn on ccache, if this docker image will be disassembled later on - cmake .. -DBUILD_SYCL=ON -DBUILD_L0=OFF -DBUILD=OCL=OFF -DCCACHE_ALLOWED=FALSE - # TODO enable mechanism for opting into L0 and OCL -- the concept is to - # subtract OCL/L0 times from SYCL times in hopes of deriving SYCL runtime - # overhead, but this is mostly an idea that needs to be mulled upon. - - if [ "$?" -eq 0 ]; then - while IFS= read -r case; do - # Skip lines starting with '#' - [ "${case##\#*}" ] || continue - - _validate_testname "$case" - make "-j$SANITIZED_COMPUTE_BENCH_COMPILE_JOBS" "$case" - done < "$TESTS_CONFIG" - fi - cd - -} - -# Check if the number of samples for a given test case is less than a threshold -# set in benchmark-ci.conf -# -# Usage: -samples_under_threshold () { - # Directory doesn't exist, samples automatically under threshold - [ ! -d "./llvm-ci-perf-results/$1" ] && return 0 - file_count="$(find "./llvm-ci-perf-results/$1" -maxdepth 1 -type f | wc -l )" - [ "$file_count" -lt "$SANITIZED_AVERAGE_MIN_THRESHOLD" ] -} - -# Check for a regression via compare.py -# -# Usage: check_regression -check_regression() { - csv_relpath="$(dirname "$1")" - csv_name="$(basename "$1")" - if samples_under_threshold "$csv_relpath"; then - echo "Not enough samples to construct a good average, performance\ - check skipped!" - return 0 # Success status - fi - python "$DEVOPS_PATH/scripts/benchmarking/compare.py" \ - "$DEVOPS_PATH" "$csv_relpath" "$csv_name" - return $? -} - -# Move the results of our benchmark into the git repo, and save benchmark -# results to artifact archive -# -# Usage: cache -cache() { - mkdir -p "$(dirname ./artifact/passing_tests/$1)" "$(dirname ./artifact/failed_tests/$1)" - cp "./artifact/failed_tests/$1" "./artifact/passing_tests/$1" - mkdir -p "$(dirname ./llvm-ci-perf-results/$1)" - mv "./artifact/failed_tests/$1" "./llvm-ci-perf-results/$1" -} - -# Check for a regression + cache if no regression found -# -# Usage: check_and_cache -check_and_cache() { - echo "Checking $1..." - if check_regression $1; then - if [ "$CACHE_RESULTS" -eq "1" ]; then - echo "Caching $1..." - cache $1 - fi - else - [ "$CACHE_RESULTS" -eq "1" ] && echo "Regression found -- Not caching!" - fi -} - -# Run and process the results of each enabled benchmark in enabled_tests.conf -process_benchmarks() { - echo "### Running and processing selected benchmarks ###" - if [ -z "$TESTS_CONFIG" ]; then - echo "Setting tests to run via cli is not currently supported." - exit 1 - else - rm ./artifact/benchmarks_errored.log ./artifact/benchmarks_failed.log 2> /dev/null - mkdir -p ./artifact - # Loop through each line of enabled_tests.conf, but ignore lines in the - # test config starting with #'s: - grep "^[^#]" "$TESTS_CONFIG" | while read -r testcase; do - _validate_testname "$testcase" - echo "# Running $testcase..." - - # The benchmark results git repo and this script's output both share - # the following directory structure: - # - # /// - # - # Instead of specifying 2 paths with a slightly different root - # folder name for every function we use, we can use a relative path - # to represent the file in both folders. - # - # Figure out the relative path of our testcase result: - test_dir_relpath="$DEVICE_SELECTOR_DIRNAME/$RUNNER/$testcase" - output_csv_relpath="$test_dir_relpath/$testcase-$TIMESTAMP.csv" - mkdir -p "./artifact/failed_tests/$test_dir_relpath" # Ensure directory exists - - # Tests are first placed in ./artifact/failed_tests, and are only - # moved to passing_tests or the performance results repo if the - # benchmark results are passing - output_csv="./artifact/failed_tests/$output_csv_relpath" - "./compute-benchmarks/build/bin/$testcase" --csv \ - --iterations="$SANITIZED_COMPUTE_BENCH_ITERATIONS" > "$output_csv" - - exit_status="$?" - if [ "$exit_status" -eq 0 ] && [ -s "$output_csv" ]; then - # Filter out header lines not in csv format: - tail +8 "$output_csv" > .tmp_res - mv .tmp_res "$output_csv" - check_and_cache $output_csv_relpath - else - echo "[ERROR] $testcase returned exit status $exit_status" - echo "-- $testcase: error $exit_status" >> ./artifact/benchmarks_errored.log - fi - done - fi -} - -# Handle failures + produce a report on what failed -process_results() { - fail=0 - if [ -s ./artifact/benchmarks_failed.log ]; then - printf "\n### Tests performing over acceptable range of average: ###\n" - cat ./artifact/benchmarks_failed.log - echo "" - fail=2 - fi - if [ -s ./artifact/benchmarks_errored.log ]; then - printf "\n### Tests that failed to run: ###\n" - cat ./artifact/benchmarks_errored.log - echo "" - fail=1 - fi - exit $fail -} - -cleanup() { - echo "### Cleaning up compute-benchmark builds from prior runs ###" - rm -rf ./compute-benchmarks - rm -rf ./llvm-ci-perf-results - [ ! -z "$_exit_after_cleanup" ] && exit -} - -load_configs() { - # This script needs to know where the intel/llvm "/devops" directory is, - # containing all the configuration files and the compare script. - # - # If this is not provided, this function tries to guess where the files - # are based on how the script is called, and verifies that all necessary - # configs and scripts are reachable. - - # This benchmarking script is usually at: - # - # /devops/scripts/benchmarking/benchmark.sh - # - # Derive /devops based on location of this script: - [ -z "$DEVOPS_PATH" ] && DEVOPS_PATH="$(dirname "$0")/../.." - if [ -z "$(printf '%s' "$DEVOPS_PATH" | grep -oE '^[a-zA-Z0-9._\/-]+$')" ]; then - echo "Bad DEVOPS_PATH, please specify DEVOPS_PATH variable." - exit 1 - fi - - TESTS_CONFIG="$(realpath "$DEVOPS_PATH/benchmarking/enabled_tests.conf")" - COMPARE_PATH="$(realpath "$DEVOPS_PATH/scripts/benchmarking/compare.py")" - LOAD_CONFIG_PY="$(realpath "$DEVOPS_PATH/scripts/benchmarking/load_config.py")" - - for file in \ - "$TESTS_CONFIG" "$COMPARE_PATH" "$LOAD_CONFIG_PY" - do - if [ ! -f "$file" ]; then - echo "Please provide path to /devops in DEVOPS_PATH." - exit -1 - fi - done - - $(python "$LOAD_CONFIG_PY" "$DEVOPS_PATH" config) - $(python "$LOAD_CONFIG_PY" "$DEVOPS_PATH" constants) -} - -##### - -load_configs - -COMPUTE_BENCH_COMPILE_FLAGS="" -CACHE_RESULTS="0" -# Timestamp format is YYYYMMDD_HHMMSS -TIMESTAMP="$(date +%Y%m%d_%H%M%S)" - -# CLI flags + overrides to configuration options: -while getopts "n:cCs" opt; do - case "$opt" in - n) - if [ -n "$(printf "%s" "$OPTARG" | sed "s/[a-zA-Z0-9_-]*//g")" ]; then - echo "Illegal characters in runner name." - exit 1 - fi - RUNNER="$OPTARG" - ;; - # Cleanup status is saved in a var to ensure all arguments are processed before - # performing cleanup - c) _cleanup=1 ;; - C) _cleanup=1 && _exit_after_cleanup=1 ;; - s) CACHE_RESULTS=1;; - \?) usage ;; - esac -done - -# Check all necessary variables exist: -if [ -z "$CMPLR_ROOT" ]; then - echo "Please set CMPLR_ROOT first; it is needed by compute-benchmarks to build." - exit 1 -elif [ -z "$ONEAPI_DEVICE_SELECTOR" ]; then - echo "Please set ONEAPI_DEVICE_SELECTOR first to specify which device to use." - exit 1 -elif [ -z "$RUNNER" ]; then - echo "Please specify runner name using -n first; it is needed for storing/comparing benchmark results." - exit 1 -fi - -# Make sure ONEAPI_DEVICE_SELECTOR doesn't try to enable multiple devices at the -# same time, or use specific device id's -_dev_sel_backend_re="$(echo "$SANITIZED_DEVICE_SELECTOR_ENABLED_BACKENDS" | sed 's/,/|/g')" -_dev_sel_device_re="$(echo "$SANITIZED_DEVICE_SELECTOR_ENABLED_DEVICES" | sed 's/,/|/g')" -_dev_sel_re="s/($_dev_sel_backend_re):($_dev_sel_device_re)//" -if [ -n "$(echo "$ONEAPI_DEVICE_SELECTOR" | sed -E "$_dev_sel_re")" ]; then - echo "Unsupported ONEAPI_DEVICE_SELECTOR value: please ensure only one \ -device is selected, and devices are not selected by indices." - echo "Enabled backends: $SANITIZED_DEVICE_SELECTOR_ENABLED_BACKENDS" - echo "Enabled device types: $SANITIZED_DEVICE_SELECTOR_ENABLED_DEVICES" - exit 1 -fi -# ONEAPI_DEVICE_SELECTOR values are not valid directory names in unix: this -# value lets us use ONEAPI_DEVICE_SELECTOR as actual directory names -DEVICE_SELECTOR_DIRNAME="$(echo "$ONEAPI_DEVICE_SELECTOR" | sed 's/:/-/')" - -# Clean up and delete all cached files if specified: -[ ! -z "$_cleanup" ] && cleanup -# Clone and build only if they aren't already cached/deleted: -[ ! -d ./llvm-ci-perf-results ] && clone_perf_res -[ ! -d ./compute-benchmarks ] && clone_compute_bench -[ ! -d ./compute-benchmarks/build ] && build_compute_bench -# Process benchmarks: -process_benchmarks -process_results \ No newline at end of file diff --git a/devops/scripts/benchmarking/common.py b/devops/scripts/benchmarking/common.py deleted file mode 100644 index c400b686db90f..0000000000000 --- a/devops/scripts/benchmarking/common.py +++ /dev/null @@ -1,196 +0,0 @@ -import re -import os -import sys -import string -import configparser - - -class Validate: - """Static class containing methods for validating various fields""" - - @staticmethod - def filepath(path: str) -> bool: - """ - Returns True if path is clean (no illegal characters), otherwise False. - """ - filepath_re = re.compile(r"[a-zA-Z0-9\/\._\-]+") - return filepath_re.match(path) is not None - - @staticmethod - def timestamp(t: str) -> bool: - """ - Returns True if t is in form YYYYMMDD_HHMMSS, otherwise False. - """ - timestamp_re = re.compile( - r"^\d{4}(0[1-9]|1[0-2])([0-2][0-9]|3[01])_([01][0-9]|2[0-3])[0-5][0-9][0-5][0-9]$" - ) - return timestamp_re.match(t) is not None - - @staticmethod - def sanitize_stat(stat: str) -> float: - """ - Sanitize statistics found in compute-benchmark output csv files. Returns - float if sanitized, None if not sanitizable. - """ - # Get rid of % - if stat[-1] == "%": - stat = stat[:-1] - - # Cast to float: If cast succeeds, the statistic is clean. - try: - return float(stat) - except ValueError: - return None - - -class SanitizedConfig: - """ - Static class for holding sanitized configuration values used within python. - - Configuration option names follow
_